xref: /dragonfly/sys/kern/kern_physio.c (revision ae24b5e0)
1 /*
2  * Copyright (c) 1994 John S. Dyson
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice immediately at the beginning of the file, without modification,
10  *    this list of conditions, and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. Absolutely no warranty of function or purpose is made by the author
15  *    John S. Dyson.
16  * 4. Modifications may be freely made to this file if the above conditions
17  *    are met.
18  */
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/conf.h>
24 #include <sys/proc.h>
25 #include <sys/uio.h>
26 #include <sys/device.h>
27 #include <sys/thread2.h>
28 
29 #include <vm/vm.h>
30 #include <vm/vm_extern.h>
31 
32 static int
33 physio(cdev_t dev, struct uio *uio, int ioflag)
34 {
35 	int i;
36 	int error;
37 	int saflags;
38 	int iolen;
39 	int bcount;
40 	caddr_t ubase;
41 	struct buf *bp;
42 
43 	if (uio->uio_segflg == UIO_USERSPACE)
44 		bp = getpbuf_mem(NULL);
45 	else
46 		bp = getpbuf_kva(NULL);
47 	saflags = bp->b_flags;
48 	error = 0;
49 
50 	/* XXX: sanity check */
51 	if (dev->si_iosize_max < PAGE_SIZE) {
52 		kprintf("WARNING: %s si_iosize_max=%d, using MAXPHYS.\n",
53 		    devtoname(dev), dev->si_iosize_max);
54 		dev->si_iosize_max = MAXPHYS;
55 	}
56 
57 	/* Must be a real uio */
58 	KKASSERT(uio->uio_segflg != UIO_NOCOPY);
59 
60 	for (i = 0; i < uio->uio_iovcnt; i++) {
61 		while (uio->uio_iov[i].iov_len) {
62 			if (uio->uio_rw == UIO_READ)
63 				bp->b_cmd = BUF_CMD_READ;
64 			else
65 				bp->b_cmd = BUF_CMD_WRITE;
66 			bp->b_flags = saflags;
67 			bcount = uio->uio_iov[i].iov_len;
68 
69 			reinitbufbio(bp);	/* clear translation cache */
70 			bp->b_bio1.bio_offset = uio->uio_offset;
71 			bp->b_bio1.bio_done = biodone_sync;
72 			bp->b_bio1.bio_flags |= BIO_SYNC;
73 
74 			/*
75 			 * Setup for mapping the request into kernel memory.
76 			 *
77 			 * We can only write as much as fits in a pbuf,
78 			 * which is MAXPHYS, and no larger then the device's
79 			 * ability.
80 			 *
81 			 * If not using bounce pages the base address of the
82 			 * user mapping into the pbuf may be offset, further
83 			 * reducing how much will actually fit in the pbuf.
84 			 */
85 			if (bcount > dev->si_iosize_max)
86 				bcount = dev->si_iosize_max;
87 
88 			ubase = uio->uio_iov[i].iov_base;
89 			iolen = ((vm_offset_t)ubase) & PAGE_MASK;
90 			if (bcount > bp->b_kvasize)
91 				bcount = bp->b_kvasize;
92 
93 			/*
94 			 * If we have to use a bounce buffer allocate kernel
95 			 * memory and copyin/copyout.  Otherwise map the
96 			 * user buffer directly into kernel memory without
97 			 * copying.
98 			 */
99 			if (uio->uio_segflg == UIO_USERSPACE) {
100 				/* bp->b_data = bp->b_kvabase; */
101 				bp->b_bcount = bcount;
102 				/*
103 				vm_hold_load_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
104 				*/
105 				if (uio->uio_rw == UIO_WRITE) {
106 					error = copyin(ubase, bp->b_data, bcount);
107 					if (error) {
108 						/*
109 						vm_hold_free_pages(bp, (vm_offset_t)bp->b_data, (vm_offset_t)bp->b_data + bcount);
110 						*/
111 						goto doerror;
112 					}
113 				}
114 			} else {
115 				bp->b_data = uio->uio_iov[i].iov_base;
116 				bp->b_bcount = bcount;
117 			}
118 			dev_dstrategy(dev, &bp->b_bio1);
119 			biowait(&bp->b_bio1, "physstr");
120 
121 			iolen = bp->b_bcount - bp->b_resid;
122 			if (uio->uio_segflg == UIO_USERSPACE) {
123 				if (uio->uio_rw == UIO_READ && iolen) {
124 					error = copyout(bp->b_data, ubase, iolen);
125 					if (error) {
126 						bp->b_flags |= B_ERROR;
127 						bp->b_error = error;
128 					}
129 				}
130 			}
131 			if (iolen == 0 && !(bp->b_flags & B_ERROR))
132 				goto doerror;	/* EOF */
133 			uio->uio_iov[i].iov_len -= iolen;
134 			uio->uio_iov[i].iov_base = (char *)uio->uio_iov[i].iov_base + iolen;
135 			uio->uio_resid -= iolen;
136 			uio->uio_offset += iolen;
137 			if (bp->b_flags & B_ERROR) {
138 				error = bp->b_error;
139 				goto doerror;
140 			}
141 		}
142 	}
143 doerror:
144 	relpbuf(bp, NULL);
145 	return (error);
146 }
147 
148 int
149 physread(struct dev_read_args *ap)
150 {
151 	return(physio(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
152 }
153 
154 int
155 physwrite(struct dev_write_args *ap)
156 {
157 	return(physio(ap->a_head.a_dev, ap->a_uio, ap->a_ioflag));
158 }
159 
160