xref: /original-bsd/usr.sbin/amd/amd/nfs_start.c (revision 95a66346)
1 /*
2  * $Id: nfs_start.c,v 5.2.1.2 90/12/21 16:41:40 jsp Alpha $
3  *
4  * Copyright (c) 1990 Jan-Simon Pendry
5  * Copyright (c) 1990 Imperial College of Science, Technology & Medicine
6  * Copyright (c) 1990 The Regents of the University of California.
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * Jan-Simon Pendry at Imperial College, London.
11  *
12  * %sccs.include.redist.c%
13  *
14  *	@(#)nfs_start.c	5.2 (Berkeley) 03/17/91
15  */
16 
17 #include "am.h"
18 #include "amq.h"
19 #include <sys/signal.h>
20 #include <setjmp.h>
21 extern jmp_buf select_intr;
22 extern int select_intr_valid;
23 
24 #ifdef HAS_TFS
25 /*
26  * Use replacement for RPC/UDP transport
27  * so that we do NFS gatewaying.
28  */
29 #define	svcudp_create svcudp2_create
30 extern SVCXPRT *svcudp2_create P((int));
31 #endif /* HAS_TFS */
32 
33 extern void nfs_program_2();
34 extern void amq_program_1();
35 
36 unsigned short nfs_port;
37 SVCXPRT *nfsxprt;
38 
39 extern int fwd_sock;
40 int max_fds = -1;
41 
42 #define	MASKED_SIGS	(sigmask(SIGINT)|sigmask(SIGTERM)|sigmask(SIGCHLD)|sigmask(SIGHUP))
43 
44 #ifdef DEBUG
45 /*
46  * Check that we are not burning resources
47  */
48 static void checkup(P_void)
49 {
50 
51 static int max_fd = 0;
52 static char *max_mem = 0;
53 
54 	int next_fd = dup(0);
55 	extern caddr_t sbrk P((int));
56 	caddr_t next_mem = sbrk(0);
57 	close(next_fd);
58 
59 	/*if (max_fd < 0) {
60 		max_fd = next_fd;
61 	} else*/ if (max_fd < next_fd) {
62 		dlog("%d new fds allocated; total is %d",
63 			next_fd - max_fd, next_fd);
64 		max_fd = next_fd;
65 	}
66 
67 	/*if (max_mem == 0) {
68 		max_mem = next_mem;
69 	} else*/ if (max_mem < next_mem) {
70 		dlog("%#x bytes of memory allocated; total is %#x (%d pages)",
71 			next_mem - max_mem,
72 			next_mem,
73 			((int)next_mem+getpagesize()-1)/getpagesize());
74 		max_mem = next_mem;
75 	}
76 }
77 #endif /* DEBUG */
78 
79 static int do_select(smask, fds, fdp, tvp)
80 int smask;
81 int fds;
82 int *fdp;
83 struct timeval *tvp;
84 {
85 	int sig;
86 	int nsel;
87 	if (sig = setjmp(select_intr)) {
88 		select_intr_valid = 0;
89 		/* Got a signal */
90 		switch (sig) {
91 		case SIGINT:
92 		case SIGTERM:
93 			amd_state = Finishing;
94 			reschedule_timeout_mp();
95 			break;
96 		}
97 		nsel = -1;
98 		errno = EINTR;
99 	} else {
100 		select_intr_valid = 1;
101 		/*
102 		 * Invalidate the current clock value
103 		 */
104 		clock_valid = 0;
105 		/*
106 		 * Allow interrupts.  If a signal
107 		 * occurs, then it will cause a longjmp
108 		 * up above.
109 		 */
110 		(void) sigsetmask(smask);
111 		/*
112 		 * Wait for input
113 		 */
114 		nsel = select(fds, fdp, (int *) 0, (int *) 0,
115 				tvp->tv_sec ? tvp : (struct timeval *) 0);
116 
117 	}
118 
119 	(void) sigblock(MASKED_SIGS);
120 
121 	/*
122 	 * Perhaps reload the cache?
123 	 */
124 	if (do_mapc_reload < clocktime()) {
125 		mapc_reload();
126 		do_mapc_reload = clocktime() + ONE_HOUR;
127 	}
128 	return nsel;
129 }
130 
131 /*
132  * Determine whether anything is left in
133  * the RPC input queue.
134  */
135 static int rpc_pending_now()
136 {
137 	struct timeval tvv;
138 	int nsel;
139 #ifdef FD_SET
140 	fd_set readfds;
141 
142 	FD_ZERO(&readfds);
143 	FD_SET(fwd_sock, &readfds);
144 #else
145 	int readfds = (1 << fwd_sock);
146 #endif /* FD_SET */
147 
148 	tvv.tv_sec = tvv.tv_usec = 0;
149 	nsel = select(max_fds+1, &readfds, (int *) 0, (int *) 0, &tvv);
150 	if (nsel < 1)
151 		return(0);
152 #ifdef FD_SET
153 	if (FD_ISSET(fwd_sock, &readfds))
154 		return(1);
155 #else
156 	if (readfds & (1 << fwd_sock))
157 		return(1);
158 #endif
159 	return(0);
160 }
161 
162 static serv_state run_rpc(P_void)
163 {
164 	int dtbsz = max_fds + 1;
165 	int smask = sigblock(MASKED_SIGS);
166 
167 	next_softclock = clocktime();
168 
169 	amd_state = Run;
170 
171 	/*
172 	 * Keep on trucking while we are in Run mode.  This state
173 	 * is switched to Quit after all the file systems have
174 	 * been unmounted.
175 	 */
176 	while ((int)amd_state <= (int)Finishing) {
177 		struct timeval tvv;
178 		int nsel;
179 		time_t now;
180 #ifdef RPC_4
181 		fd_set readfds;
182 		readfds = svc_fdset;
183 		FD_SET(fwd_sock, &readfds);
184 #else
185 #ifdef FD_SET
186 		fd_set readfds;
187 		FD_ZERO(&readfds);
188 		readfds.fds_bits[0] = svc_fds;
189 		FD_SET(fwd_sock, &readfds);
190 #else
191 		int readfds = svc_fds | (1 << fwd_sock);
192 #endif /* FD_SET */
193 #endif /* RPC_4 */
194 
195 #ifdef DEBUG
196 		checkup();
197 #endif /* DEBUG */
198 
199 		/*
200 		 * If the full timeout code is not called,
201 		 * then recompute the time delta manually.
202 		 */
203 		now = clocktime();
204 
205 		if (next_softclock <= now) {
206 			if (amd_state == Finishing)
207 				umount_exported();
208 			tvv.tv_sec = softclock();
209 		} else {
210 			tvv.tv_sec = next_softclock - now;
211 		}
212 		tvv.tv_usec = 0;
213 
214 		if (amd_state == Finishing && last_used_map < 0) {
215 			flush_mntfs();
216 			amd_state = Quit;
217 			break;
218 		}
219 
220 #ifdef DEBUG
221 		if (tvv.tv_sec)
222 			dlog("Select waits for %ds", tvv.tv_sec);
223 		else
224 			dlog("Select waits for Godot");
225 #endif /* DEBUG */
226 
227 		nsel = do_select(smask, dtbsz, &readfds, &tvv);
228 
229 
230 		switch (nsel) {
231 		case -1:
232 			if (errno == EINTR) {
233 #ifdef DEBUG
234 				dlog("select interrupted");
235 #endif /* DEBUG */
236 				continue;
237 			}
238 			perror("select");
239 			break;
240 
241 		case 0:
242 #ifdef DEBUG
243 			/*dlog("select returned 0");*/
244 #endif /* DEBUG */
245 			break;
246 
247 		default:
248 			/* Read all pending NFS responses at once to avoid
249 			   having responses queue up as a consequence of
250 			   retransmissions. */
251 #ifdef FD_SET
252 			if (FD_ISSET(fwd_sock, &readfds)) {
253 				FD_CLR(fwd_sock, &readfds);
254 #else
255 			if (readfds & (1 << fwd_sock)) {
256 				readfds &= ~(1 << fwd_sock);
257 #endif
258 				--nsel;
259 				do {
260 					fwd_reply();
261 				} while (rpc_pending_now() > 0);
262 			}
263 
264 			if (nsel) {
265 				/*
266 				 * Anything left must be a normal
267 				 * RPC request.
268 				 */
269 #ifdef RPC_4
270 				svc_getreqset(&readfds);
271 #else
272 #ifdef FD_SET
273 				svc_getreq(readfds.fds_bits[0]);
274 #else
275 				svc_getreq(readfds);
276 #endif /* FD_SET */
277 #endif /* RPC_4 */
278 			}
279 			break;
280 		}
281 	}
282 
283 	(void) sigsetmask(smask);
284 
285 	if (amd_state == Quit)
286 		amd_state = Done;
287 
288 	return amd_state;
289 }
290 
291 static int bindnfs_port(so)
292 int so;
293 {
294 	unsigned short port;
295 	int error = bind_resv_port(so, &port);
296 	if (error == 0)
297 		nfs_port = port;
298 	return error;
299 }
300 
301 void unregister_amq(P_void)
302 {
303 #ifdef DEBUG
304 	Debug(D_AMQ)
305 #endif /* DEBUG */
306 	(void) pmap_unset(AMQ_PROGRAM, AMQ_VERSION);
307 }
308 
309 int mount_automounter(ppid)
310 int ppid;
311 {
312 	int so = socket(AF_INET, SOCK_DGRAM, 0);
313 	SVCXPRT *amqp;
314 	int nmount;
315 
316 	if (so < 0 || bindnfs_port(so) < 0) {
317 		perror("Can't create privileged nfs port");
318 		return 1;
319 	}
320 
321 	if ((nfsxprt = svcudp_create(so)) == NULL ||
322 			(amqp = svcudp_create(so)) == NULL) {
323 		plog(XLOG_FATAL, "cannot create rpc/udp service");
324 		return 2;
325 	}
326 
327 	if (!svc_register(nfsxprt, NFS_PROGRAM, NFS_VERSION, nfs_program_2, 0)) {
328 		plog(XLOG_FATAL, "unable to register (NFS_PROGRAM, NFS_VERSION, 0)");
329 		return 3;
330 	}
331 
332 	/*
333 	 * Start RPC forwarding
334 	 */
335 	if (fwd_init() != 0)
336 		return 3;
337 
338 	/*
339 	 * One or other of so, fwd_sock
340 	 * must be the highest fd on
341 	 * which to select.
342 	 */
343 	if (so > max_fds)
344 		max_fds = so;
345 	if (fwd_sock > max_fds)
346 		max_fds = fwd_sock;
347 
348 	/*
349 	 * Construct the root automount node
350 	 */
351 	make_root_node();
352 
353 	/*
354 	 * Pick up the pieces from a previous run
355 	 * This is likely to (indirectly) need the rpc_fwd package
356 	 * so it *must* come after the call to fwd_init().
357 	 */
358 	if (restart_existing_mounts)
359 		restart();
360 
361 	/*
362 	 * Mount the top-level auto-mountpoints
363 	 */
364 	nmount = mount_exported();
365 
366 	/*
367 	 * Now safe to tell parent that we are up and running
368 	 */
369 	if (ppid)
370 		kill(ppid, SIGQUIT);
371 
372 	if (nmount == 0) {
373 		plog(XLOG_FATAL, "No work to do - quitting");
374 		amd_state = Done;
375 		return 0;
376 	}
377 
378 #ifdef DEBUG
379 	Debug(D_AMQ) {
380 #endif /* DEBUG */
381 	/*
382 	 * Register with amq
383 	 */
384 	unregister_amq();
385 
386 	if (!svc_register(amqp, AMQ_PROGRAM, AMQ_VERSION, amq_program_1, IPPROTO_UDP)) {
387 		plog(XLOG_FATAL, "unable to register (AMQ_PROGRAM, AMQ_VERSION, udp)");
388 		return 3;
389 	}
390 #ifdef DEBUG
391 	}
392 #endif /* DEBUG */
393 
394 	/*
395 	 * Start timeout_mp rolling
396 	 */
397 	reschedule_timeout_mp();
398 
399 	/*
400 	 * Start the server
401 	 */
402 	if (run_rpc() != Done) {
403 		plog(XLOG_FATAL, "run_rpc failed");
404 		amd_state = Done;
405 	}
406 
407 	return 0;
408 }
409