xref: /original-bsd/sys/kern/subr_xxx.c (revision fbed46ce)
1 /*	subr_xxx.c	4.10	82/04/19	*/
2 
3 /* merged into kernel:	@(#)subr.c 2.2 4/8/82 */
4 
5 #include "../h/param.h"
6 #include "../h/systm.h"
7 #include "../h/conf.h"
8 #include "../h/inode.h"
9 #include "../h/dir.h"
10 #include "../h/user.h"
11 #include "../h/buf.h"
12 #include "../h/proc.h"
13 #include "../h/fs.h"
14 
15 /*
16  * Bmap defines the structure of file system storage
17  * by returning the physical block number on a device given the
18  * inode and the logical block number in a file.
19  * When convenient, it also leaves the physical
20  * block number of the next block of the file in rablock
21  * for use in read-ahead.
22  */
23 /*VARARGS3*/
24 daddr_t
25 bmap(ip, bn, rwflg, size)
26 	register struct inode *ip;
27 	daddr_t bn;
28 	int rwflg;
29 	int size;	/* supplied only when rwflg == B_WRITE */
30 {
31 	register int i;
32 	int osize, nsize;
33 	struct buf *bp, *nbp;
34 	struct fs *fs;
35 	int j, sh;
36 	daddr_t nb, *bap, pref, blkpref();
37 
38 	if (bn < 0) {
39 		u.u_error = EFBIG;
40 		return ((daddr_t)0);
41 	}
42 	fs = ip->i_fs;
43 	rablock = 0;
44 
45 	/*
46 	 * If the next write will extend the file into a new block,
47 	 * and the file is currently composed of a fragment
48 	 * this fragment has to be extended to be a full block.
49 	 */
50 	nb = lblkno(fs, ip->i_size);
51 	if (rwflg == B_WRITE && nb < NDADDR && nb < bn) {
52 		osize = blksize(fs, ip, nb);
53 		if (osize < fs->fs_bsize && osize > 0) {
54 			bp = realloccg(ip, ip->i_db[nb],
55 				nb == 0 ? 0 : ip->i_db[nb - 1] + fs->fs_frag,
56 				osize, fs->fs_bsize);
57 			ip->i_size = (nb + 1) * fs->fs_bsize;
58 			ip->i_db[nb] = dbtofsb(fs, bp->b_blkno);
59 			ip->i_flag |= IUPD|ICHG;
60 			bdwrite(bp);
61 		}
62 	}
63 	/*
64 	 * The first NDADDR blocks are direct blocks
65 	 */
66 	if (bn < NDADDR) {
67 		i = bn;
68 		nb = ip->i_db[i];
69 		if (rwflg == B_READ) {
70 			if (nb == 0)
71 				return ((daddr_t)-1);
72 			goto gotit;
73 		}
74 		if (nb == 0 || ip->i_size < (i + 1) * fs->fs_bsize) {
75 			if (nb != 0) {
76 				/* consider need to reallocate a frag */
77 				osize = fragroundup(fs, blkoff(fs, ip->i_size));
78 				nsize = fragroundup(fs, size);
79 				if (nsize <= osize)
80 					goto gotit;
81 				bp = realloccg(ip, nb, i == 0 ?
82 					0 : ip->i_db[i - 1] + fs->fs_frag,
83 					osize, nsize);
84 			} else {
85 				if (ip->i_size < (i + 1) * fs->fs_bsize)
86 					nsize = fragroundup(fs, size);
87 				else
88 					nsize = fs->fs_bsize;
89 				bp = alloc(ip, i > 0 ?
90 					ip->i_db[i - 1] + fs->fs_frag : 0,
91 					nsize);
92 			}
93 			if (bp == NULL)
94 				return ((daddr_t)-1);
95 			nb = dbtofsb(fs, bp->b_blkno);
96 			if ((ip->i_mode&IFMT) == IFDIR)
97 				/*
98 				 * Write directory blocks synchronously
99 				 * so they never appear with garbage in
100 				 * them on the disk.
101 				 */
102 				bwrite(bp);
103 			else
104 				bdwrite(bp);
105 			ip->i_db[i] = nb;
106 			ip->i_flag |= IUPD|ICHG;
107 		}
108 gotit:
109 		if (i < NDADDR - 1)
110 			rablock = ip->i_db[i+1];
111 		return (nb);
112 	}
113 
114 	/*
115 	 * Determine how many levels of indirection.
116 	 */
117 	sh = 1;
118 	bn -= NDADDR;
119 	for (j = NIADDR; j>0; j--) {
120 		sh *= NINDIR(fs);
121 		if (bn < sh)
122 			break;
123 		bn -= sh;
124 	}
125 	if (j == 0) {
126 		u.u_error = EFBIG;
127 		return ((daddr_t)0);
128 	}
129 
130 	/*
131 	 * fetch the first indirect block
132 	 */
133 	nb = ip->i_ib[NIADDR - j];
134 	if (nb == 0) {
135 		if (rwflg==B_READ ||
136 		    (bp = alloc(ip, (daddr_t)0, fs->fs_bsize)) == NULL)
137 			return ((daddr_t)-1);
138 		nb = dbtofsb(fs, bp->b_blkno);
139 		/*
140 		 * Write synchronously so that indirect blocks
141 		 * never point at garbage.
142 		 */
143 		bwrite(bp);
144 		ip->i_ib[NIADDR - j] = nb;
145 		ip->i_flag |= IUPD|ICHG;
146 	}
147 
148 	/*
149 	 * fetch through the indirect blocks
150 	 */
151 	for (; j <= NIADDR; j++) {
152 		bp = bread(ip->i_dev, fsbtodb(fs, nb), fs->fs_bsize);
153 		if (bp->b_flags & B_ERROR) {
154 			brelse(bp);
155 			return ((daddr_t)0);
156 		}
157 		bap = bp->b_un.b_daddr;
158 		sh /= NINDIR(fs);
159 		i = (bn / sh) % NINDIR(fs);
160 		nb = bap[i];
161 		if (nb == 0) {
162 			if (rwflg==B_READ) {
163 				brelse(bp);
164 				return ((daddr_t)-1);
165 			}
166 			if (i % (fs->fs_fsize / sizeof(daddr_t)) == 0 ||
167 			    bap[i - 1] == 0)
168 				pref = blkpref(ip->i_fs);
169 			else
170 				pref = bap[i - 1] + fs->fs_frag;
171 		        nbp = alloc(ip, pref, fs->fs_bsize);
172 			if (nbp == NULL) {
173 				brelse(bp);
174 				return ((daddr_t)-1);
175 			}
176 			nb = dbtofsb(fs, nbp->b_blkno);
177 			if (j < NIADDR || (ip->i_mode&IFMT) == IFDIR)
178 				/*
179 				 * Write synchronously so indirect blocks
180 				 * never point at garbage and blocks
181 				 * in directories never contain garbage.
182 				 */
183 				bwrite(nbp);
184 			else
185 				bdwrite(nbp);
186 			bap[i] = nb;
187 			bdwrite(bp);
188 		} else
189 			brelse(bp);
190 	}
191 
192 	/*
193 	 * calculate read-ahead.
194 	 */
195 	if (i < NINDIR(fs) - 1)
196 		rablock = bap[i+1];
197 	return (nb);
198 }
199 
200 /*
201  * Pass back  c  to the user at his location u_base;
202  * update u_base, u_count, and u_offset.  Return -1
203  * on the last character of the user's read.
204  * u_base is in the user address space unless u_segflg is set.
205  */
206 passc(c)
207 register c;
208 {
209 	register id;
210 
211 	if ((id = u.u_segflg) == 1)
212 		*u.u_base = c;
213 	else
214 		if (id?suibyte(u.u_base, c):subyte(u.u_base, c) < 0) {
215 			u.u_error = EFAULT;
216 			return (-1);
217 		}
218 	u.u_count--;
219 	u.u_offset++;
220 	u.u_base++;
221 	return (u.u_count == 0? -1: 0);
222 }
223 
224 #include "ct.h"
225 #if NCT > 0
226 /*
227  * Pick up and return the next character from the user's
228  * write call at location u_base;
229  * update u_base, u_count, and u_offset.  Return -1
230  * when u_count is exhausted.  u_base is in the user's
231  * address space unless u_segflg is set.
232  */
233 cpass()
234 {
235 	register c, id;
236 
237 	if (u.u_count == 0)
238 		return (-1);
239 	if ((id = u.u_segflg) == 1)
240 		c = *u.u_base;
241 	else
242 		if ((c = id==0?fubyte(u.u_base):fuibyte(u.u_base)) < 0) {
243 			u.u_error = EFAULT;
244 			return (-1);
245 		}
246 	u.u_count--;
247 	u.u_offset++;
248 	u.u_base++;
249 	return (c&0377);
250 }
251 #endif
252 
253 /*
254  * Routine which sets a user error; placed in
255  * illegal entries in the bdevsw and cdevsw tables.
256  */
257 nodev()
258 {
259 
260 	u.u_error = ENODEV;
261 }
262 
263 /*
264  * Null routine; placed in insignificant entries
265  * in the bdevsw and cdevsw tables.
266  */
267 nulldev()
268 {
269 
270 }
271 
272 imin(a, b)
273 {
274 
275 	return (a < b ? a : b);
276 }
277 
278 imax(a, b)
279 {
280 
281 	return (a > b ? a : b);
282 }
283 
284 unsigned
285 min(a, b)
286 	unsigned int a, b;
287 {
288 
289 	return (a < b ? a : b);
290 }
291 
292 unsigned
293 max(a, b)
294 	unsigned int a, b;
295 {
296 
297 	return (a > b ? a : b);
298 }
299 
300 struct proc *
301 pfind(pid)
302 	int pid;
303 {
304 	register struct proc *p;
305 
306 	for (p = &proc[pidhash[PIDHASH(pid)]]; p != &proc[0]; p = &proc[p->p_idhash])
307 		if (p->p_pid == pid)
308 			return (p);
309 	return ((struct proc *)0);
310 }
311