xref: /original-bsd/sys/tahoe/tahoe/machdep.c (revision 2789846d)
1 /*
2  * Copyright (c) 1982,1987,1988 Regents of the University of California.
3  * All rights reserved.  The Berkeley software License Agreement
4  * specifies the terms and conditions for redistribution.
5  *
6  *	@(#)machdep.c	7.7 (Berkeley) 10/24/89
7  */
8 
9 #include "param.h"
10 #include "systm.h"
11 #include "user.h"
12 #include "kernel.h"
13 #include "map.h"
14 #include "vm.h"
15 #include "proc.h"
16 #include "buf.h"
17 #include "reboot.h"
18 #include "conf.h"
19 #include "vnode.h"
20 #include "file.h"
21 #include "text.h"
22 #include "clist.h"
23 #include "callout.h"
24 #include "cmap.h"
25 #include "malloc.h"
26 #include "mbuf.h"
27 #include "msgbuf.h"
28 #include "../ufs/quota.h"
29 
30 #include "cpu.h"
31 #include "reg.h"
32 #include "pte.h"
33 #include "psl.h"
34 #include "mem.h"
35 #include "mtpr.h"
36 #include "cp.h"
37 
38 #include "../tahoevba/vbavar.h"
39 
40 /*
41  * Declare these as initialized data so we can patch them.
42  */
43 int	nswbuf = 0;
44 #ifdef	NBUF
45 int	nbuf = NBUF;
46 #else
47 int	nbuf = 0;
48 #endif
49 #ifdef	BUFPAGES
50 int	bufpages = BUFPAGES;
51 #else
52 int	bufpages = 0;
53 #endif
54 #include "yc.h"
55 #if NCY > 0
56 #include "../tahoevba/cyreg.h"
57 #endif
58 int	msgbufmapped;		/* set when safe to use msgbuf */
59 int	physmem = MAXMEM;	/* max supported memory, changes to actual */
60 
61 /*
62  * Machine-dependent startup code
63  */
64 startup(firstaddr)
65 	int firstaddr;
66 {
67 	register int unixsize;
68 	register unsigned i;
69 	register struct pte *pte;
70 	int mapaddr, j;
71 	register caddr_t v;
72 	int maxbufs, base, residual;
73 
74 	/*
75 	 * Initialize error message buffer (at end of core).
76 	 */
77 	maxmem = physmem - btoc(sizeof (struct msgbuf));
78 	pte = msgbufmap;
79 	for (i = 1; i < btoc(sizeof (struct msgbuf)) + 1; i++)
80 		*(int *)pte++ = PG_V | PG_KW | (physmem - i);
81 	mtpr(TBIA, 1);
82 	msgbufmapped = 1;
83 #ifdef KADB
84 	kdb_init();			/* startup kernel debugger */
85 #endif
86 	/*
87 	 * Good {morning,afternoon,evening,night}.
88 	 */
89 	printf(version);
90 	printf("real mem = %d\n", ctob(physmem));
91 
92 	/*
93 	 * Allocate space for system data structures.
94 	 * The first available real memory address is in "firstaddr".
95 	 * The first available kernel virtual address is in "v".
96 	 * As pages of kernel virtual memory are allocated, "v" is incremented.
97 	 * As pages of memory are allocated and cleared,
98 	 * "firstaddr" is incremented.
99 	 * An index into the kernel page table corresponding to the
100 	 * virtual memory address maintained in "v" is kept in "mapaddr".
101 	 */
102 	v = (caddr_t)(0xc0000000 | (firstaddr * NBPG));
103 #define	valloc(name, type, num) \
104 	    (name) = (type *)v; v = (caddr_t)((name)+(num))
105 #define	valloclim(name, type, num, lim) \
106 	    (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num)))
107 #if NCY > 0
108 	/*
109 	 * Allocate raw buffers for tapemaster controllers
110 	 * first, as they need buffers in the first megabyte.
111 	 */
112 	valloc(cybuf, char, NCY * CYMAXIO);
113 #endif
114 	valloclim(vnode, struct vnode, nvnode, vnodeNVNODE);
115 	valloclim(file, struct file, nfile, fileNFILE);
116 	valloclim(proc, struct proc, nproc, procNPROC);
117 	valloclim(text, struct text, ntext, textNTEXT);
118 	valloc(cfree, struct cblock, nclist);
119 	valloc(callout, struct callout, ncallout);
120 	valloc(swapmap, struct map, nswapmap = nproc * 2);
121 	valloc(argmap, struct map, ARGMAPSIZE);
122 	valloc(kernelmap, struct map, nproc);
123 	valloc(mbmap, struct map, nmbclusters/4);
124 	valloc(namecache, struct namecache, nchsize);
125 	valloc(kmemmap, struct map, ekmempt - kmempt);
126 	valloc(kmemusage, struct kmemusage, ekmempt - kmempt);
127 #ifdef QUOTA
128 	valloclim(quota, struct quota, nquota, quotaNQUOTA);
129 	valloclim(dquot, struct dquot, ndquot, dquotNDQUOT);
130 #endif
131 
132 	/*
133 	 * Determine how many buffers to allocate.
134 	 * Use 10% of memory for the first 2 Meg, 5% of the remaining
135 	 * memory. Insure a minimum of 16 buffers.
136 	 * We allocate 1/2 as many swap buffer headers as file i/o buffers.
137 	 */
138 	if (bufpages == 0)
139 		if (physmem < (2 * 1024 * 1024))
140 			bufpages = physmem / 10 / CLSIZE;
141 		else
142 			bufpages = ((2 * 1024 * 1024 + physmem) / 20) / CLSIZE;
143 	if (nbuf == 0) {
144 		nbuf = bufpages / 2;
145 		if (nbuf < 16)
146 			nbuf = 16;
147 	}
148 	if (nswbuf == 0) {
149 		nswbuf = (nbuf / 2) &~ 1;	/* force even */
150 		if (nswbuf > 256)
151 			nswbuf = 256;		/* sanity */
152 	}
153 	valloc(swbuf, struct buf, nswbuf);
154 
155 	/*
156 	 * Now the amount of virtual memory remaining for buffers
157 	 * can be calculated, estimating needs for the cmap.
158 	 */
159 	ncmap = (maxmem*NBPG - ((int)v &~ 0xc0000000)) /
160 		(CLBYTES + sizeof(struct cmap)) + 2;
161 	maxbufs = ((SYSPTSIZE * NBPG) -
162 	    ((int)(v + ncmap * sizeof(struct cmap)) - 0xc0000000)) /
163 		(MAXBSIZE + sizeof(struct buf));
164 	if (maxbufs < 16)
165 		panic("sys pt too small");
166 	if (nbuf > maxbufs) {
167 		printf("SYSPTSIZE limits number of buffers to %d\n", maxbufs);
168 		nbuf = maxbufs;
169 	}
170 	if (bufpages > nbuf * (MAXBSIZE / CLBYTES))
171 		bufpages = nbuf * (MAXBSIZE / CLBYTES);
172 	valloc(buf, struct buf, nbuf);
173 
174 	/*
175 	 * Allocate space for core map.
176 	 * Allow space for all of phsical memory minus the amount
177 	 * dedicated to the system. The amount of physical memory
178 	 * dedicated to the system is the total virtual memory of
179 	 * the system thus far, plus core map, buffer pages,
180 	 * and buffer headers not yet allocated.
181 	 * Add 2: 1 because the 0th entry is unused, 1 for rounding.
182 	 */
183 	ncmap = (maxmem*NBPG - ((int)(v + bufpages*CLBYTES) &~ 0xc0000000)) /
184 		(CLBYTES + sizeof(struct cmap)) + 2;
185 	valloclim(cmap, struct cmap, ncmap, ecmap);
186 
187 	/*
188 	 * Clear space allocated thus far, and make r/w entries
189 	 * for the space in the kernel map.
190 	 */
191 	unixsize = btoc((int)v &~ 0xc0000000);
192 	while (firstaddr < unixsize) {
193 		*(int *)(&Sysmap[firstaddr]) = PG_V | PG_KW | firstaddr;
194 		clearseg((unsigned)firstaddr);
195 		firstaddr++;
196 	}
197 
198 	/*
199 	 * Now allocate buffers proper.  They are different than the above
200 	 * in that they usually occupy more virtual memory than physical.
201 	 */
202 	v = (caddr_t) ((int)(v + PGOFSET) &~ PGOFSET);
203 	valloc(buffers, char, MAXBSIZE * nbuf);
204 	base = bufpages / nbuf;
205 	residual = bufpages % nbuf;
206 	mapaddr = firstaddr;
207 	for (i = 0; i < residual; i++) {
208 		for (j = 0; j < (base + 1) * CLSIZE; j++) {
209 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
210 			clearseg((unsigned)firstaddr);
211 			firstaddr++;
212 		}
213 		mapaddr += MAXBSIZE / NBPG;
214 	}
215 	for (i = residual; i < nbuf; i++) {
216 		for (j = 0; j < base * CLSIZE; j++) {
217 			*(int *)(&Sysmap[mapaddr+j]) = PG_V | PG_KW | firstaddr;
218 			clearseg((unsigned)firstaddr);
219 			firstaddr++;
220 		}
221 		mapaddr += MAXBSIZE / NBPG;
222 	}
223 
224 	unixsize = btoc((int)v &~ 0xc0000000);
225 	if (firstaddr >= physmem - 8*UPAGES)
226 		panic("no memory");
227 	mtpr(TBIA, 1);			/* After we just cleared it all! */
228 
229 	/*
230 	 * Initialize callouts
231 	 */
232 	callfree = callout;
233 	for (i = 1; i < ncallout; i++)
234 		callout[i-1].c_next = &callout[i];
235 
236 	/*
237 	 * Initialize memory allocator and swap
238 	 * and user page table maps.
239 	 *
240 	 * THE USER PAGE TABLE MAP IS CALLED ``kernelmap''
241 	 * WHICH IS A VERY UNDESCRIPTIVE AND INCONSISTENT NAME.
242 	 */
243 	meminit(firstaddr, maxmem);
244 	maxmem = freemem;
245 	printf("avail mem = %d\n", ctob(maxmem));
246 	printf("using %d buffers containing %d bytes of memory\n",
247 		nbuf, bufpages * CLBYTES);
248 	rminit(kernelmap, (long)USRPTSIZE, (long)1,
249 	    "usrpt", nproc);
250 	rminit(mbmap, (long)(nmbclusters * CLSIZE), (long)CLSIZE,
251 	    "mbclusters", nmbclusters/4);
252 	kmeminit();		/* now safe to do malloc/free */
253 	intenable = 1;		/* Enable interrupts from now on */
254 
255 	/*
256 	 * Set up CPU-specific registers, cache, etc.
257 	 */
258 	initcpu();
259 
260 	/*
261 	 * Set up buffers, so they can be used to read disk labels.
262 	 */
263 	bhinit();
264 	binit();
265 
266 	/*
267 	 * Configure the system.
268 	 */
269 	configure();
270 }
271 
272 #ifdef PGINPROF
273 /*
274  * Return the difference (in microseconds)
275  * between the  current time and a previous
276  * time as represented  by the arguments.
277  * If there is a pending clock interrupt
278  * which has not been serviced due to high
279  * ipl, return error code.
280  */
281 /*ARGSUSED*/
282 vmtime(otime, olbolt, oicr)
283 	register int otime, olbolt, oicr;
284 {
285 
286 	return (((time.tv_sec-otime)*60 + lbolt-olbolt)*16667);
287 }
288 #endif
289 
290 /*
291  * Send an interrupt to process.
292  *
293  * Stack is set up to allow sigcode stored
294  * in u. to call routine, followed by kcall
295  * to sigreturn routine below.  After sigreturn
296  * resets the signal mask, the stack, and the
297  * frame pointer, it returns to the user
298  * specified pc, psl.
299  */
300 sendsig(catcher, sig, mask, code)
301 	sig_t catcher;
302 	int sig, mask;
303 	unsigned code;
304 {
305 	register struct sigcontext *scp;
306 	register struct proc *p = u.u_procp;
307 	register int *regs;
308 	register struct sigframe {
309 		int	sf_signum;
310 		int	sf_code;
311 		struct	sigcontext *sf_scp;
312 		sig_t	sf_handler;
313 		int	sf_regs[6];		/* r0-r5 */
314 		struct	sigcontext *sf_scpcopy;
315 	} *fp;
316 	int oonstack;
317 
318 	regs = u.u_ar0;
319 	oonstack = u.u_onstack;
320 	/*
321 	 * Allocate and validate space for the signal handler
322 	 * context. Note that if the stack is in P0 space, the
323 	 * call to grow() is a nop, and the useracc() check
324 	 * will fail if the process has not already allocated
325 	 * the space with a `brk'.
326 	 */
327 	if (!u.u_onstack && (u.u_sigonstack & sigmask(sig))) {
328 		scp = (struct sigcontext *)u.u_sigsp - 1;
329 		u.u_onstack = 1;
330 	} else
331 		scp = (struct sigcontext *)regs[SP] - 1;
332 	fp = (struct sigframe *)scp - 1;
333 	if ((int)fp <= USRSTACK - ctob(u.u_ssize))
334 		(void) grow((unsigned)fp);
335 	if (useracc((caddr_t)fp, sizeof (*fp) + sizeof (*scp), B_WRITE) == 0) {
336 		/*
337 		 * Process has trashed its stack; give it an illegal
338 		 * instruction to halt it in its tracks.
339 		 */
340 		SIGACTION(p, SIGILL) = SIG_DFL;
341 		sig = sigmask(SIGILL);
342 		p->p_sigignore &= ~sig;
343 		p->p_sigcatch &= ~sig;
344 		p->p_sigmask &= ~sig;
345 		psignal(p, SIGILL);
346 		return;
347 	}
348 	/*
349 	 * Build the argument list for the signal handler.
350 	 */
351 	fp->sf_signum = sig;
352 	fp->sf_code = code;
353 	fp->sf_scp = scp;
354 	fp->sf_handler = catcher;
355 	/*
356 	 * Build the callf argument frame to be used to call sigreturn.
357 	 */
358 	fp->sf_scpcopy = scp;
359 	/*
360 	 * Build the signal context to be used by sigreturn.
361 	 */
362 	scp->sc_onstack = oonstack;
363 	scp->sc_mask = mask;
364 	scp->sc_sp = regs[SP];
365 	scp->sc_fp = regs[FP];
366 	scp->sc_pc = regs[PC];
367 	scp->sc_ps = regs[PS];
368 	regs[SP] = (int)fp;
369 	regs[PC] = (int)u.u_pcb.pcb_sigc;
370 }
371 
372 /*
373  * System call to cleanup state after a signal
374  * has been taken.  Reset signal mask and
375  * stack state from context left by sendsig (above).
376  * Return to previous pc and psl as specified by
377  * context left by sendsig. Check carefully to
378  * make sure that the user has not modified the
379  * psl to gain improper priviledges or to cause
380  * a machine fault.
381  */
382 sigreturn()
383 {
384 	struct a {
385 		struct sigcontext *sigcntxp;
386 	};
387 	register struct sigcontext *scp;
388 	register int *regs = u.u_ar0;
389 
390 	scp = ((struct a *)(u.u_ap))->sigcntxp;
391 	if (useracc((caddr_t)scp, sizeof (*scp), 0) == 0) {
392 		u.u_error = EINVAL;
393 		return;
394 	}
395 	if ((scp->sc_ps & (PSL_MBZ|PSL_IPL|PSL_IS)) != 0 ||
396 	    (scp->sc_ps & (PSL_PRVMOD|PSL_CURMOD)) != (PSL_PRVMOD|PSL_CURMOD)) {
397 		u.u_error = EINVAL;
398 		return;
399 	}
400 	u.u_eosys = JUSTRETURN;
401 	u.u_onstack = scp->sc_onstack & 01;
402 	u.u_procp->p_sigmask = scp->sc_mask &~ sigcantmask;
403 	regs[FP] = scp->sc_fp;
404 	regs[SP] = scp->sc_sp;
405 	regs[PC] = scp->sc_pc;
406 	regs[PS] = scp->sc_ps;
407 }
408 
409 int	waittime = -1;
410 
411 boot(arghowto)
412 	int arghowto;
413 {
414 	register long dummy;		/* r12 is reserved */
415 	register int howto;		/* r11 == how to boot */
416 	register int devtype;		/* r10 == major of root dev */
417 	extern char *panicstr;
418 
419 	howto = arghowto;
420 	if ((howto&RB_NOSYNC) == 0 && waittime < 0 && bfreelist[0].b_forw) {
421 		register struct buf *bp;
422 		int iter, nbusy;
423 
424 		waittime = 0;
425 		(void) splnet();
426 		printf("syncing disks... ");
427 		/*
428 		 * Release inodes held by texts before update.
429 		 */
430 		if (panicstr == 0)
431 			xumount(NULL);
432 		sync();
433 
434 		for (iter = 0; iter < 20; iter++) {
435 			nbusy = 0;
436 			for (bp = &buf[nbuf]; --bp >= buf; )
437 				if ((bp->b_flags & (B_BUSY|B_INVAL)) == B_BUSY)
438 					nbusy++;
439 			if (nbusy == 0)
440 				break;
441 			printf("%d ", nbusy);
442 			DELAY(40000 * iter);
443 		}
444 		if (nbusy)
445 			printf("giving up\n");
446 		else
447 			printf("done\n");
448 		DELAY(10000);			/* wait for printf to finish */
449 	}
450 	mtpr(IPL, 0x1f);			/* extreme priority */
451 	devtype = major(rootdev);
452 	*(int *)CPBFLG = howto;
453 	if (howto&RB_HALT) {
454 		printf("halting (in tight loop); hit ~h\n\n");
455 		mtpr(IPL, 0x1f);
456 		for (;;)
457 			;
458 	} else {
459 		if (howto & RB_DUMP) {
460 			doadump();		/* CPBOOT's itsself */
461 			/*NOTREACHED*/
462 		}
463 		tocons(CPBOOT);
464 	}
465 #ifdef lint
466 	dummy = 0; dummy = dummy;
467 	printf("howto %d, devtype %d\n", arghowto, devtype);
468 #endif
469 	for (;;)
470 		asm("halt");
471 	/*NOTREACHED*/
472 }
473 
474 struct	cpdcb_o cpcontrol;
475 
476 /*
477  * Send the given comand ('c') to the console processor.
478  * Assumed to be one of the last things the OS does before
479  *  halting or rebooting.
480  */
481 tocons(c)
482 {
483 	register timeout;
484 
485 	cpcontrol.cp_hdr.cp_unit = CPUNIT;
486 	cpcontrol.cp_hdr.cp_comm =  (char)c;
487 	if (c != CPBOOT)
488 		cpcontrol.cp_hdr.cp_count = 1;	/* Just for sanity */
489 	else {
490 		cpcontrol.cp_hdr.cp_count = 4;
491 		*(int *)cpcontrol.cp_buf = 0;	/* r11 value for reboot */
492 	}
493 	timeout = 100000;				/* Delay loop */
494 	while (timeout-- && (cnlast->cp_unit&CPDONE) == 0)
495 		uncache(&cnlast->cp_unit);
496 	/* give up, force it to listen */
497 	mtpr(CPMDCB, vtoph((struct proc *)0, (unsigned)&cpcontrol));
498 }
499 
500 #if CLSIZE != 1
501 /*
502  * Invalidate single all pte's in a cluster
503  */
504 tbiscl(v)
505 	unsigned v;
506 {
507 	register caddr_t addr;		/* must be first reg var */
508 	register int i;
509 
510 	addr = ptob(v);
511 	for (i = 0; i < CLSIZE; i++) {
512 		mtpr(TBIS, addr);
513 		addr += NBPG;
514 	}
515 }
516 #endif
517 
518 int	dumpmag = 0x8fca0101;	/* magic number for savecore */
519 int	dumpsize = 0;		/* also for savecore */
520 
521 dumpconf()
522 {
523 	int nblks;
524 
525 	dumpsize = physmem;
526 	if (dumpdev != NODEV && bdevsw[major(dumpdev)].d_psize) {
527 		nblks = (*bdevsw[major(dumpdev)].d_psize)(dumpdev);
528 		if (dumpsize > btoc(dbtob(nblks - dumplo)))
529 			dumpsize = btoc(dbtob(nblks - dumplo));
530 		else if (dumplo == 0)
531 			dumplo = nblks - btodb(ctob(physmem));
532 	}
533 	/*
534 	 * Don't dump on the first CLSIZE pages,
535 	 * in case the dump device includes a disk label.
536 	 */
537 	if (dumplo < CLSIZE)
538 		dumplo = CLSIZE;
539 }
540 
541 /*
542  * Doadump comes here after turning off memory management and
543  * getting on the dump stack, either when called above, or by
544  * the auto-restart code.
545  */
546 dumpsys()
547 {
548 
549 	if (dumpdev == NODEV)
550 		return;
551 	/*
552 	 * For dumps during autoconfiguration,
553 	 * if dump device has already configured...
554 	 */
555 	if (dumpsize == 0)
556 		dumpconf();
557 	if (dumplo < 0)
558 		return;
559 	printf("\ndumping to dev %x, offset %d\n", dumpdev, dumplo);
560 	printf("dump ");
561 	switch ((*bdevsw[major(dumpdev)].d_dump)(dumpdev)) {
562 
563 	case ENXIO:
564 		printf("device bad\n");
565 		break;
566 
567 	case EFAULT:
568 		printf("device not ready\n");
569 		break;
570 
571 	case EINVAL:
572 		printf("area improper\n");
573 		break;
574 
575 	case EIO:
576 		printf("i/o error\n");
577 		break;
578 
579 	default:
580 		printf("succeeded\n");
581 		break;
582 	}
583 	printf("\n\n");
584 	DELAY(1000);
585 	tocons(CPBOOT);
586 }
587 
588 /*
589  * Bus error 'recovery' code.
590  * Print out the buss frame and then give up.
591  * (More information from special registers can be printed here.)
592  */
593 
594 /*
595  * Frame for bus error
596  */
597 struct buserframe {
598 	int	which_bus;		/* primary or secondary */
599 	int	memerreg;		/* memory error register */
600 	int	trp_pc;			/* trapped pc */
601 	int	trp_psl;		/* trapped psl */
602 };
603 
604 char	*mem_errcd[8] = {
605 	"Unknown error code 0",
606 	"Address parity error",		/* APE */
607 	"Data parity error",		/* DPE */
608 	"Data check error",		/* DCE */
609 	"Versabus timeout",		/* VTO */
610 	"Versabus error",		/* VBE */
611 	"Non-existent memory",		/* NEM */
612 	"Unknown error code 7",
613 };
614 
615 buserror(v)
616 	caddr_t v;
617 {
618 	register struct buserframe *busef = (struct buserframe *)v;
619 	register long reg;
620 
621 	printf("bus error, address %x, psl %x\n",
622 	    busef->trp_pc, busef->trp_psl);
623 	reg =  busef->memerreg;
624 	printf("mear %x %s\n",
625 	    ((reg&MEAR)>>16)&0xffff, mem_errcd[reg & ERRCD]);
626 	if (reg&AXE)
627 		printf("adapter external error\n");
628 	printf("error master: %s\n", reg&ERM ? "versabus" : "tahoe");
629 	if (reg&IVV)
630 		printf("illegal interrupt vector from ipl %d\n", (reg>>2)&7);
631 	reg = busef->which_bus;
632 	printf("mcbr %x versabus type %x\n",
633 	    ((reg&MCBR)>>16)&0xffff, reg & 0xffc3);
634 	if ((busef->memerreg&IVV) == 0)
635 		panic("buserror");
636 }
637 
638 microtime(tvp)
639 	register struct timeval *tvp;
640 {
641 	int s = splhigh();
642 
643 	*tvp = time;
644 	tvp->tv_usec += tick;
645 	while (tvp->tv_usec > 1000000) {
646 		tvp->tv_sec++;
647 		tvp->tv_usec -= 1000000;
648 	}
649 	splx(s);
650 }
651 
652 initcpu()
653 {
654 	register struct proc *p;
655 
656 	p = &proc[0];
657 #define	initkey(which, p, index) \
658     which/**/_cache[index] = 1, which/**/_cnt[index] = 1; \
659     p->p_/**/which = index;
660 	initkey(ckey, p, MAXCKEY);
661 	initkey(dkey, p, MAXDKEY);
662 }
663 
664 /*
665  * Clear registers on exec
666  */
667 setregs(entry)
668 	u_long entry;
669 {
670 
671 #ifdef notdef
672 	/* should pass args to init on the stack */
673 	for (rp = &u.u_ar0[0]; rp < &u.u_ar0[16];)
674 		*rp++ = 0;
675 #endif
676 	u.u_ar0[FP] = 0;	/* bottom of the fp chain */
677 	u.u_ar0[PC] = entry + 2;
678 }
679