1 /*
2 * Copyright (c) 1990 Jan-Simon Pendry
3 * Copyright (c) 1990 Imperial College of Science, Technology & Medicine
4 * Copyright (c) 1990, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Jan-Simon Pendry at Imperial College, London.
9 *
10 * %sccs.include.redist.c%
11 *
12 * @(#)nfs_start.c 8.1 (Berkeley) 06/06/93
13 *
14 * $Id: nfs_start.c,v 5.2.2.1 1992/02/09 15:08:51 jsp beta $
15 *
16 */
17
18 #include "am.h"
19 #include "amq.h"
20 #include <sys/signal.h>
21 #include <setjmp.h>
22 extern jmp_buf select_intr;
23 extern int select_intr_valid;
24
25 #ifdef HAS_TFS
26 /*
27 * Use replacement for RPC/UDP transport
28 * so that we do NFS gatewaying.
29 */
30 #define svcudp_create svcudp2_create
31 extern SVCXPRT *svcudp2_create P((int));
32 #endif /* HAS_TFS */
33
34 extern void nfs_program_2();
35 extern void amq_program_1();
36
37 unsigned short nfs_port;
38 SVCXPRT *nfsxprt;
39
40 extern int fwd_sock;
41 int max_fds = -1;
42
43 #define MASKED_SIGS (sigmask(SIGINT)|sigmask(SIGTERM)|sigmask(SIGCHLD)|sigmask(SIGHUP))
44
45 #ifdef DEBUG
46 /*
47 * Check that we are not burning resources
48 */
checkup(P_void)49 static void checkup(P_void)
50 {
51
52 static int max_fd = 0;
53 static char *max_mem = 0;
54
55 int next_fd = dup(0);
56 extern caddr_t sbrk P((int));
57 caddr_t next_mem = sbrk(0);
58 close(next_fd);
59
60 /*if (max_fd < 0) {
61 max_fd = next_fd;
62 } else*/ if (max_fd < next_fd) {
63 dlog("%d new fds allocated; total is %d",
64 next_fd - max_fd, next_fd);
65 max_fd = next_fd;
66 }
67
68 /*if (max_mem == 0) {
69 max_mem = next_mem;
70 } else*/ if (max_mem < next_mem) {
71 dlog("%#x bytes of memory allocated; total is %#x (%d pages)",
72 next_mem - max_mem,
73 next_mem,
74 ((int)next_mem+getpagesize()-1)/getpagesize());
75 max_mem = next_mem;
76 }
77 }
78 #endif /* DEBUG */
79
do_select(smask,fds,fdp,tvp)80 static int do_select(smask, fds, fdp, tvp)
81 int smask;
82 int fds;
83 int *fdp;
84 struct timeval *tvp;
85 {
86 int sig;
87 int nsel;
88 if (sig = setjmp(select_intr)) {
89 select_intr_valid = 0;
90 /* Got a signal */
91 switch (sig) {
92 case SIGINT:
93 case SIGTERM:
94 amd_state = Finishing;
95 reschedule_timeout_mp();
96 break;
97 }
98 nsel = -1;
99 errno = EINTR;
100 } else {
101 select_intr_valid = 1;
102 /*
103 * Invalidate the current clock value
104 */
105 clock_valid = 0;
106 /*
107 * Allow interrupts. If a signal
108 * occurs, then it will cause a longjmp
109 * up above.
110 */
111 (void) sigsetmask(smask);
112 /*
113 * Wait for input
114 */
115 nsel = select(fds, fdp, (int *) 0, (int *) 0,
116 tvp->tv_sec ? tvp : (struct timeval *) 0);
117
118 }
119
120 (void) sigblock(MASKED_SIGS);
121
122 /*
123 * Perhaps reload the cache?
124 */
125 if (do_mapc_reload < clocktime()) {
126 mapc_reload();
127 do_mapc_reload = clocktime() + ONE_HOUR;
128 }
129 return nsel;
130 }
131
132 /*
133 * Determine whether anything is left in
134 * the RPC input queue.
135 */
rpc_pending_now()136 static int rpc_pending_now()
137 {
138 struct timeval tvv;
139 int nsel;
140 #ifdef FD_SET
141 fd_set readfds;
142
143 FD_ZERO(&readfds);
144 FD_SET(fwd_sock, &readfds);
145 #else
146 int readfds = (1 << fwd_sock);
147 #endif /* FD_SET */
148
149 tvv.tv_sec = tvv.tv_usec = 0;
150 nsel = select(max_fds+1, &readfds, (int *) 0, (int *) 0, &tvv);
151 if (nsel < 1)
152 return(0);
153 #ifdef FD_SET
154 if (FD_ISSET(fwd_sock, &readfds))
155 return(1);
156 #else
157 if (readfds & (1 << fwd_sock))
158 return(1);
159 #endif
160 return(0);
161 }
162
run_rpc(P_void)163 static serv_state run_rpc(P_void)
164 {
165 int dtbsz = max_fds + 1;
166 int smask = sigblock(MASKED_SIGS);
167
168 next_softclock = clocktime();
169
170 amd_state = Run;
171
172 /*
173 * Keep on trucking while we are in Run mode. This state
174 * is switched to Quit after all the file systems have
175 * been unmounted.
176 */
177 while ((int)amd_state <= (int)Finishing) {
178 struct timeval tvv;
179 int nsel;
180 time_t now;
181 #ifdef RPC_4
182 fd_set readfds;
183 readfds = svc_fdset;
184 FD_SET(fwd_sock, &readfds);
185 #else
186 #ifdef FD_SET
187 fd_set readfds;
188 FD_ZERO(&readfds);
189 readfds.fds_bits[0] = svc_fds;
190 FD_SET(fwd_sock, &readfds);
191 #else
192 int readfds = svc_fds | (1 << fwd_sock);
193 #endif /* FD_SET */
194 #endif /* RPC_4 */
195
196 #ifdef DEBUG
197 checkup();
198 #endif /* DEBUG */
199
200 /*
201 * If the full timeout code is not called,
202 * then recompute the time delta manually.
203 */
204 now = clocktime();
205
206 if (next_softclock <= now) {
207 if (amd_state == Finishing)
208 umount_exported();
209 tvv.tv_sec = softclock();
210 } else {
211 tvv.tv_sec = next_softclock - now;
212 }
213 tvv.tv_usec = 0;
214
215 if (amd_state == Finishing && last_used_map < 0) {
216 flush_mntfs();
217 amd_state = Quit;
218 break;
219 }
220
221 #ifdef DEBUG
222 if (tvv.tv_sec)
223 dlog("Select waits for %ds", tvv.tv_sec);
224 else
225 dlog("Select waits for Godot");
226 #endif /* DEBUG */
227
228 nsel = do_select(smask, dtbsz, &readfds, &tvv);
229
230
231 switch (nsel) {
232 case -1:
233 if (errno == EINTR) {
234 #ifdef DEBUG
235 dlog("select interrupted");
236 #endif /* DEBUG */
237 continue;
238 }
239 perror("select");
240 break;
241
242 case 0:
243 #ifdef DEBUG
244 /*dlog("select returned 0");*/
245 #endif /* DEBUG */
246 break;
247
248 default:
249 /* Read all pending NFS responses at once to avoid
250 having responses queue up as a consequence of
251 retransmissions. */
252 #ifdef FD_SET
253 if (FD_ISSET(fwd_sock, &readfds)) {
254 FD_CLR(fwd_sock, &readfds);
255 #else
256 if (readfds & (1 << fwd_sock)) {
257 readfds &= ~(1 << fwd_sock);
258 #endif
259 --nsel;
260 do {
261 fwd_reply();
262 } while (rpc_pending_now() > 0);
263 }
264
265 if (nsel) {
266 /*
267 * Anything left must be a normal
268 * RPC request.
269 */
270 #ifdef RPC_4
271 svc_getreqset(&readfds);
272 #else
273 #ifdef FD_SET
274 svc_getreq(readfds.fds_bits[0]);
275 #else
276 svc_getreq(readfds);
277 #endif /* FD_SET */
278 #endif /* RPC_4 */
279 }
280 break;
281 }
282 }
283
284 (void) sigsetmask(smask);
285
286 if (amd_state == Quit)
287 amd_state = Done;
288
289 return amd_state;
290 }
291
292 static int bindnfs_port(so)
293 int so;
294 {
295 unsigned short port;
296 int error = bind_resv_port(so, &port);
297 if (error == 0)
298 nfs_port = port;
299 return error;
300 }
301
302 void unregister_amq(P_void)
303 {
304 #ifdef DEBUG
305 Debug(D_AMQ)
306 #endif /* DEBUG */
307 (void) pmap_unset(AMQ_PROGRAM, AMQ_VERSION);
308 }
309
310 int mount_automounter(ppid)
311 int ppid;
312 {
313 int so = socket(AF_INET, SOCK_DGRAM, 0);
314 SVCXPRT *amqp;
315 int nmount;
316
317 if (so < 0 || bindnfs_port(so) < 0) {
318 perror("Can't create privileged nfs port");
319 return 1;
320 }
321
322 if ((nfsxprt = svcudp_create(so)) == NULL ||
323 (amqp = svcudp_create(so)) == NULL) {
324 plog(XLOG_FATAL, "cannot create rpc/udp service");
325 return 2;
326 }
327
328 if (!svc_register(nfsxprt, NFS_PROGRAM, NFS_VERSION, nfs_program_2, 0)) {
329 plog(XLOG_FATAL, "unable to register (NFS_PROGRAM, NFS_VERSION, 0)");
330 return 3;
331 }
332
333 /*
334 * Start RPC forwarding
335 */
336 if (fwd_init() != 0)
337 return 3;
338
339 /*
340 * One or other of so, fwd_sock
341 * must be the highest fd on
342 * which to select.
343 */
344 if (so > max_fds)
345 max_fds = so;
346 if (fwd_sock > max_fds)
347 max_fds = fwd_sock;
348
349 /*
350 * Construct the root automount node
351 */
352 make_root_node();
353
354 /*
355 * Pick up the pieces from a previous run
356 * This is likely to (indirectly) need the rpc_fwd package
357 * so it *must* come after the call to fwd_init().
358 */
359 if (restart_existing_mounts)
360 restart();
361
362 /*
363 * Mount the top-level auto-mountpoints
364 */
365 nmount = mount_exported();
366
367 /*
368 * Now safe to tell parent that we are up and running
369 */
370 if (ppid)
371 kill(ppid, SIGQUIT);
372
373 if (nmount == 0) {
374 plog(XLOG_FATAL, "No work to do - quitting");
375 amd_state = Done;
376 return 0;
377 }
378
379 #ifdef DEBUG
380 Debug(D_AMQ) {
381 #endif /* DEBUG */
382 /*
383 * Register with amq
384 */
385 unregister_amq();
386
387 if (!svc_register(amqp, AMQ_PROGRAM, AMQ_VERSION, amq_program_1, IPPROTO_UDP)) {
388 plog(XLOG_FATAL, "unable to register (AMQ_PROGRAM, AMQ_VERSION, udp)");
389 return 3;
390 }
391 #ifdef DEBUG
392 }
393 #endif /* DEBUG */
394
395 /*
396 * Start timeout_mp rolling
397 */
398 reschedule_timeout_mp();
399
400 /*
401 * Start the server
402 */
403 if (run_rpc() != Done) {
404 plog(XLOG_FATAL, "run_rpc failed");
405 amd_state = Done;
406 }
407
408 return 0;
409 }
410