xref: /openbsd/sys/kern/kern_time.c (revision 610f49f8)
1 /*	$OpenBSD: kern_time.c,v 1.27 2002/02/15 18:51:20 pvalchev Exp $	*/
2 /*	$NetBSD: kern_time.c,v 1.20 1996/02/18 11:57:06 fvdl Exp $	*/
3 
4 /*
5  * Copyright (c) 1982, 1986, 1989, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *	This product includes software developed by the University of
19  *	California, Berkeley and its contributors.
20  * 4. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)kern_time.c	8.4 (Berkeley) 5/26/95
37  */
38 
39 #include <sys/param.h>
40 #include <sys/resourcevar.h>
41 #include <sys/kernel.h>
42 #include <sys/systm.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/signalvar.h>
46 
47 #include <sys/mount.h>
48 #include <sys/syscallargs.h>
49 
50 #if defined(NFSCLIENT) || defined(NFSSERVER)
51 #include <nfs/rpcv2.h>
52 #include <nfs/nfsproto.h>
53 #include <nfs/nfs_var.h>
54 #endif
55 
56 #include <machine/cpu.h>
57 
58 void	settime __P((struct timeval *));
59 void	itimerround __P((struct timeval *));
60 
61 /*
62  * Time of day and interval timer support.
63  *
64  * These routines provide the kernel entry points to get and set
65  * the time-of-day and per-process interval timers.  Subroutines
66  * here provide support for adding and subtracting timeval structures
67  * and decrementing interval timers, optionally reloading the interval
68  * timers when they expire.
69  */
70 
71 /* This function is used by clock_settime and settimeofday */
72 void
73 settime(tv)
74 	struct timeval *tv;
75 {
76 	struct timeval delta;
77 	int s;
78 
79 	/* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */
80 	s = splclock();
81 	timersub(tv, &time, &delta);
82 	time = *tv;
83 	(void) spllowersoftclock();
84 	timeradd(&boottime, &delta, &boottime);
85 	timeradd(&runtime, &delta, &runtime);
86 	splx(s);
87 	resettodr();
88 }
89 
90 /* ARGSUSED */
91 int
92 sys_clock_gettime(p, v, retval)
93 	struct proc *p;
94 	void *v;
95 	register_t *retval;
96 {
97 	register struct sys_clock_gettime_args /* {
98 		syscallarg(clockid_t) clock_id;
99 		syscallarg(struct timespec *) tp;
100 	} */ *uap = v;
101 	clockid_t clock_id;
102 	struct timeval atv;
103 	struct timespec ats;
104 
105 	clock_id = SCARG(uap, clock_id);
106 	if (clock_id != CLOCK_REALTIME)
107 		return (EINVAL);
108 
109 	microtime(&atv);
110 	TIMEVAL_TO_TIMESPEC(&atv,&ats);
111 
112 	return copyout(&ats, SCARG(uap, tp), sizeof(ats));
113 }
114 
115 /* ARGSUSED */
116 int
117 sys_clock_settime(p, v, retval)
118 	struct proc *p;
119 	void *v;
120 	register_t *retval;
121 {
122 	register struct sys_clock_settime_args /* {
123 		syscallarg(clockid_t) clock_id;
124 		syscallarg(const struct timespec *) tp;
125 	} */ *uap = v;
126 	clockid_t clock_id;
127 	struct timeval atv;
128 	struct timespec ats;
129 	int error;
130 
131 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
132 		return (error);
133 
134 	clock_id = SCARG(uap, clock_id);
135 	if (clock_id != CLOCK_REALTIME)
136 		return (EINVAL);
137 
138 	if ((error = copyin(SCARG(uap, tp), &ats, sizeof(ats))) != 0)
139 		return (error);
140 
141 	TIMESPEC_TO_TIMEVAL(&atv,&ats);
142 
143 	/*
144 	 * If the system is secure, we do not allow the time to be
145 	 * set to an earlier value (it may be slowed using adjtime,
146 	 * but not set back). This feature prevent interlopers from
147 	 * setting arbitrary time stamps on files.
148 	 */
149 	if (securelevel > 1 && timercmp(&atv, &time, <))
150 		return (EPERM);
151 	settime(&atv);
152 
153 	return (0);
154 }
155 
156 int
157 sys_clock_getres(p, v, retval)
158 	struct proc *p;
159 	void *v;
160 	register_t *retval;
161 {
162 	register struct sys_clock_getres_args /* {
163 		syscallarg(clockid_t) clock_id;
164 		syscallarg(struct timespec *) tp;
165 	} */ *uap = v;
166 	clockid_t clock_id;
167 	struct timespec ts;
168 	int error = 0;
169 
170 	clock_id = SCARG(uap, clock_id);
171 	if (clock_id != CLOCK_REALTIME)
172 		return (EINVAL);
173 
174 	if (SCARG(uap, tp)) {
175 		ts.tv_sec = 0;
176 		ts.tv_nsec = 1000000000 / hz;
177 
178 		error = copyout(&ts, SCARG(uap, tp), sizeof (ts));
179 	}
180 
181 	return error;
182 }
183 
184 /* ARGSUSED */
185 int
186 sys_nanosleep(p, v, retval)
187 	struct proc *p;
188 	void *v;
189 	register_t *retval;
190 {
191 	static int nanowait;
192 	register struct sys_nanosleep_args/* {
193 		syscallarg(const struct timespec *) rqtp;
194 		syscallarg(struct timespec *) rmtp;
195 	} */ *uap = v;
196 	struct timespec rqt;
197 	struct timespec rmt;
198 	struct timeval atv, utv;
199 	int error, s, timo;
200 
201 	error = copyin((const void *)SCARG(uap, rqtp), (void *)&rqt,
202 	    sizeof(struct timespec));
203 	if (error)
204 		return (error);
205 
206 	TIMESPEC_TO_TIMEVAL(&atv,&rqt)
207 	if (itimerfix(&atv))
208 		return (EINVAL);
209 
210 	s = splclock();
211 	timeradd(&atv,&time,&atv);
212 	timo = hzto(&atv);
213 	splx(s);
214 	/*
215 	 * Avoid inadvertantly sleeping forever
216 	 */
217 	if (timo <= 0)
218 		timo = 1;
219 
220 	error = tsleep(&nanowait, PWAIT | PCATCH, "nanosleep", timo);
221 	if (error == ERESTART)
222 		error = EINTR;
223 	if (error == EWOULDBLOCK)
224 		error = 0;
225 
226 	if (SCARG(uap, rmtp)) {
227 		int error;
228 
229 		s = splclock();
230 		utv = time;
231 		splx(s);
232 
233 		timersub(&atv, &utv, &utv);
234 		if (utv.tv_sec < 0)
235 			timerclear(&utv);
236 
237 		TIMEVAL_TO_TIMESPEC(&utv, &rmt);
238 		error = copyout((void *)&rmt, (void *)SCARG(uap,rmtp),
239 		    sizeof(rmt));
240 		if (error)
241 			return (error);
242 	}
243 
244 	return error;
245 }
246 
247 /* ARGSUSED */
248 int
249 sys_gettimeofday(p, v, retval)
250 	struct proc *p;
251 	void *v;
252 	register_t *retval;
253 {
254 	register struct sys_gettimeofday_args /* {
255 		syscallarg(struct timeval *) tp;
256 		syscallarg(struct timezone *) tzp;
257 	} */ *uap = v;
258 	struct timeval atv;
259 	int error = 0;
260 
261 	if (SCARG(uap, tp)) {
262 		microtime(&atv);
263 		if ((error = copyout((void *)&atv, (void *)SCARG(uap, tp),
264 		    sizeof (atv))))
265 			return (error);
266 	}
267 	if (SCARG(uap, tzp))
268 		error = copyout((void *)&tz, (void *)SCARG(uap, tzp),
269 		    sizeof (tz));
270 	return (error);
271 }
272 
273 /* ARGSUSED */
274 int
275 sys_settimeofday(p, v, retval)
276 	struct proc *p;
277 	void *v;
278 	register_t *retval;
279 {
280 	struct sys_settimeofday_args /* {
281 		syscallarg(struct timeval *) tv;
282 		syscallarg(struct timezone *) tzp;
283 	} */ *uap = v;
284 	struct timeval atv;
285 	struct timezone atz;
286 	int error;
287 
288 	if ((error = suser(p->p_ucred, &p->p_acflag)))
289 		return (error);
290 	/* Verify all parameters before changing time. */
291 	if (SCARG(uap, tv) && (error = copyin((void *)SCARG(uap, tv),
292 	    (void *)&atv, sizeof(atv))))
293 		return (error);
294 	if (SCARG(uap, tzp) && (error = copyin((void *)SCARG(uap, tzp),
295 	    (void *)&atz, sizeof(atz))))
296 		return (error);
297 	if (SCARG(uap, tv)) {
298 		/*
299 		 * Don't allow the time to be set forward so far it will wrap
300 		 * and become negative, thus allowing an attacker to bypass
301 		 * the next check below.  The cutoff is 1 year before rollover
302 		 * occurs, so even if the attacker uses adjtime(2) to move
303 		 * the time past the cutoff, it will take a very long time
304 		 * to get to the wrap point.
305 		 *
306 		 * XXX: we check against INT_MAX since on 64-bit
307 		 *	platforms, sizeof(int) != sizeof(long) and
308 		 *	time_t is 32 bits even when atv.tv_sec is 64 bits.
309 		 */
310 		if (atv.tv_sec > INT_MAX - 365*24*60*60) {
311 			printf("denied attempt to set clock forward to %ld\n",
312 			    atv.tv_sec);
313 			return (EPERM);
314 		}
315 		/*
316 		 * If the system is secure, we do not allow the time to be
317 		 * set to an earlier value (it may be slowed using adjtime,
318 		 * but not set back). This feature prevent interlopers from
319 		 * setting arbitrary time stamps on files.
320 		 */
321 		if (securelevel > 1 && timercmp(&atv, &time, <)) {
322 			printf("denied attempt to set clock back %ld seconds\n",
323 			    time.tv_sec - atv.tv_sec);
324 			return (EPERM);
325 		}
326 		settime(&atv);
327 	}
328 	if (SCARG(uap, tzp))
329 		tz = atz;
330 	return (0);
331 }
332 
333 int	tickdelta;			/* current clock skew, us. per tick */
334 long	timedelta;			/* unapplied time correction, us. */
335 long	bigadj = 1000000;		/* use 10x skew above bigadj us. */
336 
337 /* ARGSUSED */
338 int
339 sys_adjtime(p, v, retval)
340 	struct proc *p;
341 	void *v;
342 	register_t *retval;
343 {
344 	register struct sys_adjtime_args /* {
345 		syscallarg(struct timeval *) delta;
346 		syscallarg(struct timeval *) olddelta;
347 	} */ *uap = v;
348 	struct timeval atv;
349 	register long ndelta, ntickdelta, odelta;
350 	int s, error;
351 
352 	if ((error = suser(p->p_ucred, &p->p_acflag)))
353 		return (error);
354 	if ((error = copyin((void *)SCARG(uap, delta), (void *)&atv,
355 	    sizeof(struct timeval))))
356 		return (error);
357 
358 	/*
359 	 * Compute the total correction and the rate at which to apply it.
360 	 * Round the adjustment down to a whole multiple of the per-tick
361 	 * delta, so that after some number of incremental changes in
362 	 * hardclock(), tickdelta will become zero, lest the correction
363 	 * overshoot and start taking us away from the desired final time.
364 	 */
365 	ndelta = atv.tv_sec * 1000000 + atv.tv_usec;
366 	if (ndelta > bigadj)
367 		ntickdelta = 10 * tickadj;
368 	else
369 		ntickdelta = tickadj;
370 	if (ndelta % ntickdelta)
371 		ndelta = ndelta / ntickdelta * ntickdelta;
372 
373 	/*
374 	 * To make hardclock()'s job easier, make the per-tick delta negative
375 	 * if we want time to run slower; then hardclock can simply compute
376 	 * tick + tickdelta, and subtract tickdelta from timedelta.
377 	 */
378 	if (ndelta < 0)
379 		ntickdelta = -ntickdelta;
380 	s = splclock();
381 	odelta = timedelta;
382 	timedelta = ndelta;
383 	tickdelta = ntickdelta;
384 	splx(s);
385 
386 	if (SCARG(uap, olddelta)) {
387 		atv.tv_sec = odelta / 1000000;
388 		atv.tv_usec = odelta % 1000000;
389 		if ((error = copyout((void *)&atv, (void *)SCARG(uap, olddelta),
390 		    sizeof(struct timeval))))
391 			return (error);
392 	}
393 	return (0);
394 }
395 
396 /*
397  * Get value of an interval timer.  The process virtual and
398  * profiling virtual time timers are kept in the p_stats area, since
399  * they can be swapped out.  These are kept internally in the
400  * way they are specified externally: in time until they expire.
401  *
402  * The real time interval timer is kept in the process table slot
403  * for the process, and its value (it_value) is kept as an
404  * absolute time rather than as a delta, so that it is easy to keep
405  * periodic real-time signals from drifting.
406  *
407  * Virtual time timers are processed in the hardclock() routine of
408  * kern_clock.c.  The real time timer is processed by a timeout
409  * routine, called from the softclock() routine.  Since a callout
410  * may be delayed in real time due to interrupt processing in the system,
411  * it is possible for the real time timeout routine (realitexpire, given below),
412  * to be delayed in real time past when it is supposed to occur.  It
413  * does not suffice, therefore, to reload the real timer .it_value from the
414  * real time timers .it_interval.  Rather, we compute the next time in
415  * absolute time the timer should go off.
416  */
417 /* ARGSUSED */
418 int
419 sys_getitimer(p, v, retval)
420 	struct proc *p;
421 	void *v;
422 	register_t *retval;
423 {
424 	register struct sys_getitimer_args /* {
425 		syscallarg(u_int) which;
426 		syscallarg(struct itimerval *) itv;
427 	} */ *uap = v;
428 	struct itimerval aitv;
429 	int s;
430 
431 	if (SCARG(uap, which) > ITIMER_PROF)
432 		return (EINVAL);
433 	s = splclock();
434 	if (SCARG(uap, which) == ITIMER_REAL) {
435 		/*
436 		 * Convert from absolute to relative time in .it_value
437 		 * part of real time timer.  If time for real time timer
438 		 * has passed return 0, else return difference between
439 		 * current time and time for the timer to go off.
440 		 */
441 		aitv = p->p_realtimer;
442 		if (timerisset(&aitv.it_value)) {
443 			if (timercmp(&aitv.it_value, &time, <))
444 				timerclear(&aitv.it_value);
445 			else
446 				timersub(&aitv.it_value, &time,
447 				    &aitv.it_value);
448 		}
449 	} else
450 		aitv = p->p_stats->p_timer[SCARG(uap, which)];
451 	splx(s);
452 	return (copyout((void *)&aitv, (void *)SCARG(uap, itv),
453 	    sizeof (struct itimerval)));
454 }
455 
456 /* ARGSUSED */
457 int
458 sys_setitimer(p, v, retval)
459 	struct proc *p;
460 	register void *v;
461 	register_t *retval;
462 {
463 	register struct sys_setitimer_args /* {
464 		syscallarg(u_int) which;
465 		syscallarg(struct itimerval *) itv;
466 		syscallarg(struct itimerval *) oitv;
467 	} */ *uap = v;
468 	struct itimerval aitv;
469 	register const struct itimerval *itvp;
470 	int s, error;
471 	int timo;
472 
473 	if (SCARG(uap, which) > ITIMER_PROF)
474 		return (EINVAL);
475 	itvp = SCARG(uap, itv);
476 	if (itvp && (error = copyin((void *)itvp, (void *)&aitv,
477 	    sizeof(struct itimerval))))
478 		return (error);
479 	if ((SCARG(uap, itv) = SCARG(uap, oitv)) &&
480 	    (error = sys_getitimer(p, uap, retval)))
481 		return (error);
482 	if (itvp == 0)
483 		return (0);
484 	if (itimerfix(&aitv.it_value) || itimerfix(&aitv.it_interval))
485 		return (EINVAL);
486 	s = splclock();
487 	if (SCARG(uap, which) == ITIMER_REAL) {
488 		timeout_del(&p->p_realit_to);
489 		if (timerisset(&aitv.it_value)) {
490 			timeradd(&aitv.it_value, &time, &aitv.it_value);
491 			timo = hzto(&aitv.it_value);
492 			if (timo <= 0)
493 				timo = 1;
494 			timeout_add(&p->p_realit_to, timo);
495 		}
496 		p->p_realtimer = aitv;
497 	} else {
498 		itimerround(&aitv.it_interval);
499 		p->p_stats->p_timer[SCARG(uap, which)] = aitv;
500 	}
501 	splx(s);
502 	return (0);
503 }
504 
505 /*
506  * Real interval timer expired:
507  * send process whose timer expired an alarm signal.
508  * If time is not set up to reload, then just return.
509  * Else compute next time timer should go off which is > current time.
510  * This is where delay in processing this timeout causes multiple
511  * SIGALRM calls to be compressed into one.
512  */
513 void
514 realitexpire(arg)
515 	void *arg;
516 {
517 	register struct proc *p;
518 	int s, timo;
519 
520 	p = (struct proc *)arg;
521 	psignal(p, SIGALRM);
522 	if (!timerisset(&p->p_realtimer.it_interval)) {
523 		timerclear(&p->p_realtimer.it_value);
524 		return;
525 	}
526 	for (;;) {
527 		s = splclock();
528 		timeradd(&p->p_realtimer.it_value,
529 		    &p->p_realtimer.it_interval, &p->p_realtimer.it_value);
530 		if (timercmp(&p->p_realtimer.it_value, &time, >)) {
531 			timo = hzto(&p->p_realtimer.it_value);
532 			if (timo <= 0)
533 				timo = 1;
534 			timeout_add(&p->p_realit_to, timo);
535 			splx(s);
536 			return;
537 		}
538 		splx(s);
539 	}
540 }
541 
542 /*
543  * Check that a proposed value to load into the .it_value or
544  * .it_interval part of an interval timer is acceptable.
545  */
546 int
547 itimerfix(tv)
548 	struct timeval *tv;
549 {
550 
551 	if (tv->tv_sec < 0 || tv->tv_sec > 100000000 ||
552 	    tv->tv_usec < 0 || tv->tv_usec >= 1000000)
553 		return (EINVAL);
554 
555 	return (0);
556 }
557 
558 /*
559  * Timer interval smaller than the resolution of the system clock are
560  * rounded up.
561  */
562 void
563 itimerround(tv)
564 	struct timeval *tv;
565 {
566 	if (tv->tv_sec == 0 && tv->tv_usec < tick)
567 		tv->tv_usec = tick;
568 }
569 
570 /*
571  * Decrement an interval timer by a specified number
572  * of microseconds, which must be less than a second,
573  * i.e. < 1000000.  If the timer expires, then reload
574  * it.  In this case, carry over (usec - old value) to
575  * reduce the value reloaded into the timer so that
576  * the timer does not drift.  This routine assumes
577  * that it is called in a context where the timers
578  * on which it is operating cannot change in value.
579  */
580 int
581 itimerdecr(itp, usec)
582 	register struct itimerval *itp;
583 	int usec;
584 {
585 
586 	if (itp->it_value.tv_usec < usec) {
587 		if (itp->it_value.tv_sec == 0) {
588 			/* expired, and already in next interval */
589 			usec -= itp->it_value.tv_usec;
590 			goto expire;
591 		}
592 		itp->it_value.tv_usec += 1000000;
593 		itp->it_value.tv_sec--;
594 	}
595 	itp->it_value.tv_usec -= usec;
596 	usec = 0;
597 	if (timerisset(&itp->it_value))
598 		return (1);
599 	/* expired, exactly at end of interval */
600 expire:
601 	if (timerisset(&itp->it_interval)) {
602 		itp->it_value = itp->it_interval;
603 		itp->it_value.tv_usec -= usec;
604 		if (itp->it_value.tv_usec < 0) {
605 			itp->it_value.tv_usec += 1000000;
606 			itp->it_value.tv_sec--;
607 		}
608 	} else
609 		itp->it_value.tv_usec = 0;		/* sec is already 0 */
610 	return (0);
611 }
612 
613 /*
614  * ratecheck(): simple time-based rate-limit checking.  see ratecheck(9)
615  * for usage and rationale.
616  */
617 int
618 ratecheck(lasttime, mininterval)
619 	struct timeval *lasttime;
620 	const struct timeval *mininterval;
621 {
622 	struct timeval tv, delta;
623 	int s, rv = 0;
624 
625 	s = splclock();
626 	tv = mono_time;
627 	splx(s);
628 
629 	timersub(&tv, lasttime, &delta);
630 
631 	/*
632 	 * check for 0,0 is so that the message will be seen at least once,
633 	 * even if interval is huge.
634 	 */
635 	if (timercmp(&delta, mininterval, >=) ||
636 	    (lasttime->tv_sec == 0 && lasttime->tv_usec == 0)) {
637 		*lasttime = tv;
638 		rv = 1;
639 	}
640 
641 	return (rv);
642 }
643 
644 /*
645  * ppsratecheck(): packets (or events) per second limitation.
646  */
647 int
648 ppsratecheck(lasttime, curpps, maxpps)
649 	struct timeval *lasttime;
650 	int *curpps;
651 	int maxpps;	/* maximum pps allowed */
652 {
653 	struct timeval tv, delta;
654 	int s, rv;
655 
656 	s = splclock();
657 	tv = mono_time;
658 	splx(s);
659 
660 	timersub(&tv, lasttime, &delta);
661 
662 	/*
663 	 * check for 0,0 is so that the message will be seen at least once.
664 	 * if more than one second have passed since the last update of
665 	 * lasttime, reset the counter.
666 	 *
667 	 * we do increment *curpps even in *curpps < maxpps case, as some may
668 	 * try to use *curpps for stat purposes as well.
669 	 */
670 	if ((lasttime->tv_sec == 0 && lasttime->tv_usec == 0) ||
671 	    delta.tv_sec >= 1) {
672 		*lasttime = tv;
673 		*curpps = 0;
674 		rv = 1;
675 	} else if (maxpps < 0)
676 		rv = 1;
677 	else if (*curpps < maxpps)
678 		rv = 1;
679 	else
680 		rv = 0;
681 
682 #if 1 /*DIAGNOSTIC?*/
683 	/* be careful about wrap-around */
684 	if (*curpps + 1 > *curpps)
685 		*curpps = *curpps + 1;
686 #else
687 	/*
688 	 * assume that there's not too many calls to this function.
689 	 * not sure if the assumption holds, as it depends on *caller's*
690 	 * behavior, not the behavior of this function.
691 	 * IMHO it is wrong to make assumption on the caller's behavior,
692 	 * so the above #if is #if 1, not #ifdef DIAGNOSTIC.
693 	 */
694 	*curpps = *curpps + 1;
695 #endif
696 
697 	return (rv);
698 }
699