xref: /minix/minix/servers/vfs/device.c (revision 433d6423)
1 /* When a needed block is not in the cache, it must be fetched from the disk.
2  * Special character files also require I/O.  The routines for these are here.
3  *
4  * The entry points in this file are:
5  *   cdev_open:   open a character device
6  *   cdev_close:  close a character device
7  *   cdev_io:     initiate a read, write, or ioctl to a character device
8  *   cdev_select: initiate a select call on a device
9  *   cdev_cancel: cancel an I/O request, blocking until it has been cancelled
10  *   cdev_reply:  process the result of a character driver request
11  *   bdev_open:   open a block device
12  *   bdev_close:  close a block device
13  *   bdev_reply:  process the result of a block driver request
14  *   bdev_up:     a block driver has been mapped in
15  *   do_ioctl:    perform the IOCTL system call
16  */
17 
18 #include "fs.h"
19 #include <string.h>
20 #include <fcntl.h>
21 #include <assert.h>
22 #include <sys/stat.h>
23 #include <sys/ttycom.h>
24 #include <minix/callnr.h>
25 #include <minix/com.h>
26 #include <minix/endpoint.h>
27 #include <minix/ioctl.h>
28 #include <minix/u64.h>
29 #include "file.h"
30 #include "scratchpad.h"
31 #include "dmap.h"
32 #include <minix/vfsif.h>
33 #include "vnode.h"
34 #include "vmnt.h"
35 
36 static int cdev_opcl(int op, dev_t dev, int flags);
37 static int block_io(endpoint_t driver_e, message *mess_ptr);
38 static cp_grant_id_t make_grant(endpoint_t driver_e, endpoint_t user_e, int op,
39 	vir_bytes buf, unsigned long size);
40 
41 /*===========================================================================*
42  *				bdev_open				     *
43  *===========================================================================*/
44 int bdev_open(dev_t dev, int access)
45 {
46 /* Open a block device. */
47   devmajor_t major_dev;
48   devminor_t minor_dev;
49   message dev_mess;
50   int r;
51 
52   major_dev = major(dev);
53   minor_dev = minor(dev);
54   if (major_dev < 0 || major_dev >= NR_DEVICES) return ENXIO;
55   if (dmap[major_dev].dmap_driver == NONE) return ENXIO;
56 
57   memset(&dev_mess, 0, sizeof(dev_mess));
58   dev_mess.m_type = BDEV_OPEN;
59   dev_mess.m_lbdev_lblockdriver_msg.minor = minor_dev;
60   dev_mess.m_lbdev_lblockdriver_msg.access = 0;
61   if (access & R_BIT) dev_mess.m_lbdev_lblockdriver_msg.access |= BDEV_R_BIT;
62   if (access & W_BIT) dev_mess.m_lbdev_lblockdriver_msg.access |= BDEV_W_BIT;
63   dev_mess.m_lbdev_lblockdriver_msg.id = 0;
64 
65   /* Call the task. */
66   r = block_io(dmap[major_dev].dmap_driver, &dev_mess);
67   if (r != OK)
68 	return r;
69 
70   return dev_mess.m_lblockdriver_lbdev_reply.status;
71 }
72 
73 
74 /*===========================================================================*
75  *				bdev_close				     *
76  *===========================================================================*/
77 int bdev_close(dev_t dev)
78 {
79 /* Close a block device. */
80   devmajor_t major_dev;
81   devminor_t minor_dev;
82   message dev_mess;
83   int r;
84 
85   major_dev = major(dev);
86   minor_dev = minor(dev);
87   if (major_dev < 0 || major_dev >= NR_DEVICES) return ENXIO;
88   if (dmap[major_dev].dmap_driver == NONE) return ENXIO;
89 
90   memset(&dev_mess, 0, sizeof(dev_mess));
91   dev_mess.m_type = BDEV_CLOSE;
92   dev_mess.m_lbdev_lblockdriver_msg.minor = minor_dev;
93   dev_mess.m_lbdev_lblockdriver_msg.id = 0;
94 
95   r = block_io(dmap[major_dev].dmap_driver, &dev_mess);
96   if (r != OK)
97 	return r;
98 
99   return dev_mess.m_lblockdriver_lbdev_reply.status;
100 }
101 
102 
103 /*===========================================================================*
104  *				bdev_ioctl				     *
105  *===========================================================================*/
106 static int bdev_ioctl(dev_t dev, endpoint_t proc_e, unsigned long req,
107 	vir_bytes buf)
108 {
109 /* Perform an I/O control operation on a block device. */
110   struct dmap *dp;
111   cp_grant_id_t gid;
112   message dev_mess;
113   devmajor_t major_dev;
114   devminor_t minor_dev;
115   int r;
116 
117   major_dev = major(dev);
118   minor_dev = minor(dev);
119 
120   /* Determine task dmap. */
121   dp = &dmap[major_dev];
122   if (dp->dmap_driver == NONE) {
123 	printf("VFS: bdev_ioctl: no driver for major %d\n", major_dev);
124 	return(ENXIO);
125   }
126 
127   /* Set up a grant if necessary. */
128   gid = make_grant(dp->dmap_driver, proc_e, BDEV_IOCTL, buf, req);
129 
130   /* Set up the message passed to the task. */
131   memset(&dev_mess, 0, sizeof(dev_mess));
132 
133   dev_mess.m_type = BDEV_IOCTL;
134   dev_mess.m_lbdev_lblockdriver_msg.minor = minor_dev;
135   dev_mess.m_lbdev_lblockdriver_msg.request = req;
136   dev_mess.m_lbdev_lblockdriver_msg.grant = gid;
137   dev_mess.m_lbdev_lblockdriver_msg.user = proc_e;
138   dev_mess.m_lbdev_lblockdriver_msg.id = 0;
139 
140   /* Call the task. */
141   r = block_io(dp->dmap_driver, &dev_mess);
142 
143   /* Clean up. */
144   if (GRANT_VALID(gid)) cpf_revoke(gid);
145 
146   /* Return the result. */
147   if (r != OK)
148 	return(r);
149 
150   return(dev_mess.m_lblockdriver_lbdev_reply.status);
151 }
152 
153 
154 /*===========================================================================*
155  *				make_grant				     *
156  *===========================================================================*/
157 static cp_grant_id_t make_grant(endpoint_t driver_e, endpoint_t user_e, int op,
158 	vir_bytes buf, unsigned long bytes)
159 {
160 /* Create a magic grant for the given operation and buffer. */
161   cp_grant_id_t gid;
162   int access;
163   size_t size;
164 
165   switch (op) {
166   case CDEV_READ:
167   case CDEV_WRITE:
168 	gid = cpf_grant_magic(driver_e, user_e, buf,
169 		(size_t) bytes, op == CDEV_READ ? CPF_WRITE : CPF_READ);
170 	break;
171 
172   case CDEV_IOCTL:
173   case BDEV_IOCTL:
174 	/* For IOCTLs, the bytes parameter contains the IOCTL request.
175 	 * This request encodes the requested access method and buffer size.
176 	 */
177 	access = 0;
178 	if(_MINIX_IOCTL_IOR(bytes)) access |= CPF_WRITE;
179 	if(_MINIX_IOCTL_IOW(bytes)) access |= CPF_READ;
180 	if(_MINIX_IOCTL_BIG(bytes))
181 		size = _MINIX_IOCTL_SIZE_BIG(bytes);
182 	else
183 		size = _MINIX_IOCTL_SIZE(bytes);
184 
185 	/* Grant access to the buffer even if no I/O happens with the ioctl,
186 	 * although now that we no longer identify responses based on grants,
187 	 * this is not strictly necessary.
188 	 */
189 	gid = cpf_grant_magic(driver_e, user_e, buf, size, access);
190 	break;
191 
192   default:
193 	panic("VFS: unknown operation %d", op);
194   }
195 
196   if (!GRANT_VALID(gid))
197 	panic("VFS: cpf_grant_magic failed");
198 
199   return gid;
200 }
201 
202 /*===========================================================================*
203  *				cdev_get				     *
204  *===========================================================================*/
205 static struct dmap *cdev_get(dev_t dev, devminor_t *minor_dev)
206 {
207 /* Obtain the dmap structure for the given device, if a valid driver exists for
208  * the major device. Perform redirection for CTTY_MAJOR.
209  */
210   devmajor_t major_dev;
211   struct dmap *dp;
212   int slot;
213 
214   /* First cover one special case: /dev/tty, the magic device that translates
215    * to the controlling tty.
216    */
217   if (major(dev) == CTTY_MAJOR) {
218 	/* No controlling terminal? Fail the request. */
219 	if (fp->fp_tty == 0) return(NULL);
220 
221 	/* Substitute the controlling terminal device. */
222 	dev = fp->fp_tty;
223   }
224 
225   /* Determine task dmap. */
226   major_dev = major(dev);
227   if (major_dev < 0 || major_dev >= NR_DEVICES) return(NULL);
228 
229   dp = &dmap[major_dev];
230 
231   /* See if driver is roughly valid. */
232   if (dp->dmap_driver == NONE) return(NULL);
233 
234   if (isokendpt(dp->dmap_driver, &slot) != OK) {
235 	printf("VFS: cdev_get: old driver for major %x (%d)\n", major_dev,
236 		dp->dmap_driver);
237 	return(NULL);
238   }
239 
240   /* Also return the (possibly redirected) minor number. */
241   *minor_dev = minor(dev);
242   return dp;
243 }
244 
245 /*===========================================================================*
246  *				cdev_io					     *
247  *===========================================================================*/
248 int cdev_io(
249   int op,			/* CDEV_READ, CDEV_WRITE, or CDEV_IOCTL */
250   dev_t dev,			/* major-minor device number */
251   endpoint_t proc_e,		/* in whose address space is buf? */
252   vir_bytes buf,		/* virtual address of the buffer */
253   off_t pos,			/* byte position */
254   unsigned long bytes,		/* how many bytes to transfer, or request */
255   int flags			/* special flags, like O_NONBLOCK */
256 )
257 {
258 /* Initiate a read, write, or ioctl to a character device. */
259   devminor_t minor_dev;
260   struct dmap *dp;
261   message dev_mess;
262   cp_grant_id_t gid;
263   int r;
264 
265   assert(op == CDEV_READ || op == CDEV_WRITE || op == CDEV_IOCTL);
266 
267   /* Determine task map. */
268   if ((dp = cdev_get(dev, &minor_dev)) == NULL)
269 	return(EIO);
270 
271   /* Handle TIOCSCTTY ioctl: set controlling tty.
272    * TODO: cleaner implementation work in progress.
273    */
274   if (op == CDEV_IOCTL && bytes == TIOCSCTTY && major(dev) == TTY_MAJOR) {
275        fp->fp_tty = dev;
276   }
277 
278   /* Create a grant for the buffer provided by the user process. */
279   gid = make_grant(dp->dmap_driver, proc_e, op, buf, bytes);
280 
281   /* Set up the rest of the message that will be sent to the driver. */
282   memset(&dev_mess, 0, sizeof(dev_mess));
283   dev_mess.m_type = op;
284   dev_mess.m_vfs_lchardriver_readwrite.minor = minor_dev;
285   if (op == CDEV_IOCTL) {
286 	dev_mess.m_vfs_lchardriver_readwrite.request = bytes;
287 	dev_mess.m_vfs_lchardriver_readwrite.user = proc_e;
288   } else {
289 	dev_mess.m_vfs_lchardriver_readwrite.pos = pos;
290 	dev_mess.m_vfs_lchardriver_readwrite.count = bytes;
291   }
292   dev_mess.m_vfs_lchardriver_readwrite.id = proc_e;
293   dev_mess.m_vfs_lchardriver_readwrite.grant = gid;
294   dev_mess.m_vfs_lchardriver_readwrite.flags = 0;
295   if (flags & O_NONBLOCK)
296 	  dev_mess.m_vfs_lchardriver_readwrite.flags |= CDEV_NONBLOCK;
297 
298   /* Send the request to the driver. */
299   if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
300 	panic("VFS: asynsend in cdev_io failed: %d", r);
301 
302   /* Suspend the calling process until a reply arrives. */
303   wait_for(dp->dmap_driver);
304   assert(!GRANT_VALID(fp->fp_grant));
305   fp->fp_grant = gid;	/* revoke this when unsuspended. */
306 
307   return SUSPEND;
308 }
309 
310 
311 /*===========================================================================*
312  *				cdev_clone				     *
313  *===========================================================================*/
314 static int cdev_clone(dev_t dev, devminor_t new_minor)
315 {
316 /* A new minor device number has been returned. Request PFS to create a
317  * temporary device file to hold it.
318  */
319   struct vnode *vp;
320   struct node_details res;
321   int r;
322 
323   /* Device number of the new device. */
324   dev = makedev(major(dev), new_minor);
325 
326   /* Issue request */
327   r = req_newnode(PFS_PROC_NR, fp->fp_effuid, fp->fp_effgid,
328       ALL_MODES | I_CHAR_SPECIAL, dev, &res);
329   if (r != OK) {
330 	(void) cdev_opcl(CDEV_CLOSE, dev, 0);
331 	return r;
332   }
333 
334   /* Drop old node and use the new values */
335   if ((vp = get_free_vnode()) == NULL) {
336 	req_putnode(PFS_PROC_NR, res.inode_nr, 1); /* is this right? */
337 	(void) cdev_opcl(CDEV_CLOSE, dev, 0);
338 	return(err_code);
339   }
340   lock_vnode(vp, VNODE_OPCL);
341 
342   assert(fp->fp_filp[scratch(fp).file.fd_nr] != NULL);
343   unlock_vnode(fp->fp_filp[scratch(fp).file.fd_nr]->filp_vno);
344   put_vnode(fp->fp_filp[scratch(fp).file.fd_nr]->filp_vno);
345 
346   vp->v_fs_e = res.fs_e;
347   vp->v_vmnt = NULL;
348   vp->v_dev = NO_DEV;
349   vp->v_fs_e = res.fs_e;
350   vp->v_inode_nr = res.inode_nr;
351   vp->v_mode = res.fmode;
352   vp->v_sdev = dev;
353   vp->v_fs_count = 1;
354   vp->v_ref_count = 1;
355   fp->fp_filp[scratch(fp).file.fd_nr]->filp_vno = vp;
356 
357   return OK;
358 }
359 
360 
361 /*===========================================================================*
362  *				cdev_opcl				     *
363  *===========================================================================*/
364 static int cdev_opcl(
365   int op,			/* operation, CDEV_OPEN or CDEV_CLOSE */
366   dev_t dev,			/* device to open or close */
367   int flags			/* mode bits and flags */
368 )
369 {
370 /* Open or close a character device. */
371   devminor_t minor_dev, new_minor;
372   struct dmap *dp;
373   struct fproc *rfp;
374   message dev_mess;
375   int r, r2;
376 
377   assert(op == CDEV_OPEN || op == CDEV_CLOSE);
378 
379   /* Determine task dmap. */
380   if ((dp = cdev_get(dev, &minor_dev)) == NULL)
381 	return(ENXIO);
382 
383   /* CTTY exception: do not actually send the open/close request for /dev/tty
384    * to the driver.  This avoids the case that the actual device will remain
385    * open forever if the process calls setsid() after opening /dev/tty.
386    */
387   if (major(dev) == CTTY_MAJOR) return(OK);
388 
389   /* Add O_NOCTTY to the access flags if this process is not a session leader,
390    * or if it already has a controlling tty, or if it is someone else's
391    * controlling tty.  For performance reasons, only search the full process
392    * table if this driver has set controlling ttys before.
393    */
394   if (!(fp->fp_flags & FP_SESLDR) || fp->fp_tty != 0) {
395 	flags |= O_NOCTTY;
396   } else if (!(flags & O_NOCTTY) && dp->dmap_seen_tty) {
397 	for (rfp = &fproc[0]; rfp < &fproc[NR_PROCS]; rfp++)
398 		if (rfp->fp_pid != PID_FREE && rfp->fp_tty == dev)
399 			flags |= O_NOCTTY;
400   }
401 
402   /* Prepare the request message. */
403   memset(&dev_mess, 0, sizeof(dev_mess));
404 
405   dev_mess.m_type = op;
406   dev_mess.m_vfs_lchardriver_openclose.minor = minor_dev;
407   dev_mess.m_vfs_lchardriver_openclose.id = who_e;
408   if (op == CDEV_OPEN) {
409 	dev_mess.m_vfs_lchardriver_openclose.user = who_e;
410 	dev_mess.m_vfs_lchardriver_openclose.access = 0;
411 	if (flags & R_BIT)
412 		dev_mess.m_vfs_lchardriver_openclose.access |= CDEV_R_BIT;
413 	if (flags & W_BIT)
414 		dev_mess.m_vfs_lchardriver_openclose.access |= CDEV_W_BIT;
415 	if (flags & O_NOCTTY)
416 		dev_mess.m_vfs_lchardriver_openclose.access |= CDEV_NOCTTY;
417   }
418 
419   /* Send the request to the driver. */
420   if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
421 	panic("VFS: asynsend in cdev_opcl failed: %d", r);
422 
423   /* Block the thread waiting for a reply. */
424   fp->fp_task = dp->dmap_driver;
425   self->w_task = dp->dmap_driver;
426   self->w_drv_sendrec = &dev_mess;
427 
428   worker_wait();
429 
430   self->w_task = NONE;
431   self->w_drv_sendrec = NULL;
432 
433   /* Process the reply. */
434   r = dev_mess.m_lchardriver_vfs_reply.status;
435 
436   if (op == CDEV_OPEN && r >= 0) {
437 	/* Some devices need special processing upon open. Such a device is
438 	 * "cloned", i.e. on a succesful open it is replaced by a new device
439 	 * with a new unique minor device number. This new device number
440 	 * identifies a new object (such as a new network connection) that has
441 	 * been allocated within a driver.
442 	 */
443 	if (r & CDEV_CLONED) {
444 		new_minor = r & ~(CDEV_CLONED | CDEV_CTTY);
445 		if ((r2 = cdev_clone(dev, new_minor)) < 0)
446 			return(r2);
447 	}
448 
449 	/* Did this call make the tty the controlling tty? */
450 	if (r & CDEV_CTTY) {
451 		fp->fp_tty = dev;
452 		dp->dmap_seen_tty = TRUE;
453 	}
454 
455 	r = OK;
456   }
457 
458   /* Return the result from the driver. */
459   return(r);
460 }
461 
462 
463 /*===========================================================================*
464  *				cdev_open				     *
465  *===========================================================================*/
466 int cdev_open(dev_t dev, int flags)
467 {
468 /* Open a character device. */
469 
470   return cdev_opcl(CDEV_OPEN, dev, flags);
471 }
472 
473 
474 /*===========================================================================*
475  *				cdev_close				     *
476  *===========================================================================*/
477 int cdev_close(dev_t dev)
478 {
479 /* Close a character device. */
480 
481   return cdev_opcl(CDEV_CLOSE, dev, 0);
482 }
483 
484 
485 /*===========================================================================*
486  *				do_ioctl				     *
487  *===========================================================================*/
488 int do_ioctl(void)
489 {
490 /* Perform the ioctl(2) system call. */
491   unsigned long ioctlrequest;
492   int r = OK;
493   struct filp *f;
494   register struct vnode *vp;
495   dev_t dev;
496   vir_bytes argx;
497 
498   scratch(fp).file.fd_nr = job_m_in.m_lc_vfs_ioctl.fd;
499   ioctlrequest = job_m_in.m_lc_vfs_ioctl.req;
500   argx = (vir_bytes)job_m_in.m_lc_vfs_ioctl.arg;
501 
502   if ((f = get_filp(scratch(fp).file.fd_nr, VNODE_READ)) == NULL)
503 	return(err_code);
504   vp = f->filp_vno;		/* get vnode pointer */
505   if (!S_ISCHR(vp->v_mode) && !S_ISBLK(vp->v_mode)) {
506 	r = ENOTTY;
507   }
508 
509   if (r == OK) {
510 	dev = vp->v_sdev;
511 
512 	if (S_ISBLK(vp->v_mode)) {
513 		f->filp_ioctl_fp = fp;
514 
515 		r = bdev_ioctl(dev, who_e, ioctlrequest, argx);
516 
517 		f->filp_ioctl_fp = NULL;
518 	} else
519 		r = cdev_io(CDEV_IOCTL, dev, who_e, argx, 0, ioctlrequest,
520 			f->filp_flags);
521   }
522 
523   unlock_filp(f);
524 
525   return(r);
526 }
527 
528 
529 /*===========================================================================*
530  *				cdev_select				     *
531  *===========================================================================*/
532 int cdev_select(dev_t dev, int ops)
533 {
534 /* Initiate a select call on a device. Return OK iff the request was sent. */
535   devminor_t minor_dev;
536   message dev_mess;
537   struct dmap *dp;
538   int r;
539 
540   /* Determine task dmap. */
541   if ((dp = cdev_get(dev, &minor_dev)) == NULL)
542 	return(EIO);
543 
544   /* Prepare the request message. */
545   memset(&dev_mess, 0, sizeof(dev_mess));
546 
547   dev_mess.m_type = CDEV_SELECT;
548   dev_mess.m_vfs_lchardriver_select.minor = minor_dev;
549   dev_mess.m_vfs_lchardriver_select.ops = ops;
550 
551   /* Send the request to the driver. */
552   if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
553 	panic("VFS: asynsend in cdev_select failed: %d", r);
554 
555   return(OK);
556 }
557 
558 
559 /*===========================================================================*
560  *				cdev_cancel				     *
561  *===========================================================================*/
562 int cdev_cancel(dev_t dev)
563 {
564 /* Cancel an I/O request, blocking until it has been cancelled. */
565   devminor_t minor_dev;
566   message dev_mess;
567   struct dmap *dp;
568   int r;
569 
570   /* Determine task dmap. */
571   if ((dp = cdev_get(dev, &minor_dev)) == NULL)
572 	return(EIO);
573 
574   /* Prepare the request message. */
575   memset(&dev_mess, 0, sizeof(dev_mess));
576 
577   dev_mess.m_type = CDEV_CANCEL;
578   dev_mess.m_vfs_lchardriver_cancel.minor = minor_dev;
579   dev_mess.m_vfs_lchardriver_cancel.id = fp->fp_endpoint;
580 
581   /* Send the request to the driver. */
582   if ((r = asynsend3(dp->dmap_driver, &dev_mess, AMF_NOREPLY)) != OK)
583 	panic("VFS: asynsend in cdev_cancel failed: %d", r);
584 
585   /* Suspend this thread until we have received the response. */
586   fp->fp_task = dp->dmap_driver;
587   self->w_task = dp->dmap_driver;
588   self->w_drv_sendrec = &dev_mess;
589 
590   worker_wait();
591 
592   self->w_task = NONE;
593   self->w_drv_sendrec = NULL;
594 
595   /* Clean up and return the result (note: the request may have completed). */
596   if (GRANT_VALID(fp->fp_grant)) {
597 	(void) cpf_revoke(fp->fp_grant);
598 	fp->fp_grant = GRANT_INVALID;
599   }
600 
601   r = dev_mess.m_lchardriver_vfs_reply.status;
602   return (r == EAGAIN) ? EINTR : r;
603 }
604 
605 
606 /*===========================================================================*
607  *				block_io				     *
608  *===========================================================================*/
609 static int block_io(endpoint_t driver_e, message *mess_ptr)
610 {
611 /* Perform I/O on a block device. The current thread is suspended until a reply
612  * comes in from the driver.
613  */
614   int r, status, retry_count;
615   message mess_retry;
616 
617   assert(IS_BDEV_RQ(mess_ptr->m_type));
618   mess_retry = *mess_ptr;
619   retry_count = 0;
620 
621   do {
622 	r = drv_sendrec(driver_e, mess_ptr);
623 	if (r != OK)
624 		return r;
625 
626 	status = mess_ptr->m_lblockdriver_lbdev_reply.status;
627 	if (status == ERESTART) {
628 		r = EDEADEPT;
629 		*mess_ptr = mess_retry;
630 		retry_count++;
631 	}
632   } while (status == ERESTART && retry_count < 5);
633 
634   /* If we failed to restart the request, return EIO */
635   if (status == ERESTART && retry_count >= 5)
636 	return EIO;
637 
638   if (r != OK) {
639 	if (r == EDEADSRCDST || r == EDEADEPT) {
640 		printf("VFS: dead driver %d\n", driver_e);
641 		dmap_unmap_by_endpt(driver_e);
642 		return(EIO);
643 	} else if (r == ELOCKED) {
644 		printf("VFS: ELOCKED talking to %d\n", driver_e);
645 		return(EIO);
646 	}
647 	panic("block_io: can't send/receive: %d", r);
648   }
649 
650   return(OK);
651 }
652 
653 
654 /*===========================================================================*
655  *				bdev_up					     *
656  *===========================================================================*/
657 void bdev_up(devmajor_t maj)
658 {
659   /* A new block device driver has been mapped in. This may affect both mounted
660    * file systems and open block-special files.
661    */
662   int r, found, bits;
663   struct filp *rfilp;
664   struct vmnt *vmp;
665   struct vnode *vp;
666   char *label;
667 
668   if (maj < 0 || maj >= NR_DEVICES) panic("VFS: out-of-bound major");
669   label = dmap[maj].dmap_label;
670   found = 0;
671 
672   /* For each block-special file that was previously opened on the affected
673    * device, we need to reopen it on the new driver.
674    */
675   for (rfilp = filp; rfilp < &filp[NR_FILPS]; rfilp++) {
676 	if (rfilp->filp_count < 1 || !(vp = rfilp->filp_vno)) continue;
677 	if (major(vp->v_sdev) != maj) continue;
678 	if (!S_ISBLK(vp->v_mode)) continue;
679 
680 	/* Reopen the device on the driver, once per filp. */
681 	bits = rfilp->filp_mode & (R_BIT|W_BIT);
682 	if ((r = bdev_open(vp->v_sdev, bits)) != OK) {
683 		printf("VFS: mounted dev %d/%d re-open failed: %d.\n",
684 			maj, minor(vp->v_sdev), r);
685 		dmap[maj].dmap_recovering = 0;
686 		return; /* Give up entirely */
687 	}
688 
689 	found = 1;
690   }
691 
692   /* Tell each affected mounted file system about the new endpoint.
693    */
694   for (vmp = &vmnt[0]; vmp < &vmnt[NR_MNTS]; ++vmp) {
695 	if (major(vmp->m_dev) != maj) continue;
696 
697 	/* Send the driver label to the mounted file system. */
698 	if (OK != req_newdriver(vmp->m_fs_e, vmp->m_dev, label))
699 		printf("VFS dev_up: error sending new driver label to %d\n",
700 		       vmp->m_fs_e);
701   }
702 
703   /* If any block-special file was open for this major at all, also inform the
704    * root file system about the new driver. We do this even if the
705    * block-special file is linked to another mounted file system, merely
706    * because it is more work to check for that case.
707    */
708   if (found) {
709 	if (OK != req_newdriver(ROOT_FS_E, makedev(maj, 0), label))
710 		printf("VFSdev_up: error sending new driver label to %d\n",
711 			ROOT_FS_E);
712   }
713 }
714 
715 
716 /*===========================================================================*
717  *				cdev_generic_reply			     *
718  *===========================================================================*/
719 static void cdev_generic_reply(message *m_ptr)
720 {
721 /* A character driver has results for an open, close, read, write, or ioctl
722  * call (i.e., everything except select). There may be a thread waiting for
723  * these results as part of an ongoing open, close, or (for read/write/ioctl)
724  * cancel call. If so, wake up that thread; if not, send a reply to the
725  * requesting process. This function MUST NOT block its calling thread.
726  */
727   struct fproc *rfp;
728   struct worker_thread *wp;
729   endpoint_t proc_e;
730   int slot;
731 
732   proc_e = m_ptr->m_lchardriver_vfs_reply.id;
733 
734   if (m_ptr->m_lchardriver_vfs_reply.status == SUSPEND) {
735 	printf("VFS: got SUSPEND from %d, not reviving\n", m_ptr->m_source);
736 	return;
737   }
738 
739   if (isokendpt(proc_e, &slot) != OK) {
740 	printf("VFS: proc %d from %d not found\n", proc_e, m_ptr->m_source);
741 	return;
742   }
743   rfp = &fproc[slot];
744   wp = rfp->fp_worker;
745   if (wp != NULL && wp->w_task == who_e) {
746 	assert(!fp_is_blocked(rfp));
747 	*wp->w_drv_sendrec = *m_ptr;
748 	worker_signal(wp);	/* Continue open/close/cancel */
749   } else if (rfp->fp_blocked_on != FP_BLOCKED_ON_OTHER ||
750 		rfp->fp_task != m_ptr->m_source) {
751 	/* This would typically be caused by a protocol error, i.e. a driver
752 	 * not properly following the character driver protocol rules.
753 	 */
754 	printf("VFS: proc %d not blocked on %d\n", proc_e, m_ptr->m_source);
755   } else {
756 	revive(proc_e, m_ptr->m_lchardriver_vfs_reply.status);
757   }
758 }
759 
760 
761 /*===========================================================================*
762  *			       cdev_reply				     *
763  *===========================================================================*/
764 void cdev_reply(void)
765 {
766 /* A character driver has results for us. */
767 
768   if (get_dmap(who_e) == NULL) {
769 	printf("VFS: ignoring char dev reply from unknown driver %d\n", who_e);
770 	return;
771   }
772 
773   switch (call_nr) {
774   case CDEV_REPLY:
775 	cdev_generic_reply(&m_in);
776 	break;
777   case CDEV_SEL1_REPLY:
778 	select_reply1(m_in.m_source, m_in.m_lchardriver_vfs_sel1.minor,
779 		m_in.m_lchardriver_vfs_sel1.status);
780 	break;
781   case CDEV_SEL2_REPLY:
782 	select_reply2(m_in.m_source, m_in.m_lchardriver_vfs_sel2.minor,
783 		m_in.m_lchardriver_vfs_sel2.status);
784 	break;
785   default:
786 	printf("VFS: char driver %u sent unknown reply %x\n", who_e, call_nr);
787   }
788 }
789 
790 
791 /*===========================================================================*
792  *				bdev_reply				     *
793  *===========================================================================*/
794 void bdev_reply(void)
795 {
796 /* A block driver has results for a call. There must be a thread waiting for
797  * these results - wake it up. This function MUST NOT block its calling thread.
798  */
799   struct worker_thread *wp;
800   struct dmap *dp;
801 
802   if ((dp = get_dmap(who_e)) == NULL) {
803 	printf("VFS: ignoring block dev reply from unknown driver %d\n",
804 		who_e);
805 	return;
806   }
807 
808   if (dp->dmap_servicing == INVALID_THREAD) {
809 	printf("VFS: ignoring spurious block dev reply from %d\n", who_e);
810 	return;
811   }
812 
813   wp = worker_get(dp->dmap_servicing);
814   if (wp == NULL || wp->w_task != who_e) {
815 	printf("VFS: no worker thread waiting for a reply from %d\n", who_e);
816 	return;
817   }
818 
819   assert(wp->w_drv_sendrec != NULL);
820   *wp->w_drv_sendrec = m_in;
821   wp->w_drv_sendrec = NULL;
822   worker_signal(wp);
823 }
824