xref: /dragonfly/sys/net/altq/altq_subr.c (revision 5de36205)
1 /*	$KAME: altq_subr.c,v 1.23 2004/04/20 16:10:06 itojun Exp $	*/
2 /*	$DragonFly: src/sys/net/altq/altq_subr.c,v 1.5 2005/06/03 18:20:36 swildner Exp $ */
3 
4 /*
5  * Copyright (C) 1997-2003
6  *	Sony Computer Science Laboratories Inc.  All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include "opt_altq.h"
31 #include "opt_inet.h"
32 #include "opt_inet6.h"
33 
34 #include <sys/param.h>
35 #include <sys/malloc.h>
36 #include <sys/mbuf.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/socket.h>
40 #include <sys/socketvar.h>
41 #include <sys/kernel.h>
42 #include <sys/callout.h>
43 #include <sys/errno.h>
44 #include <sys/syslog.h>
45 #include <sys/sysctl.h>
46 #include <sys/queue.h>
47 #include <sys/thread2.h>
48 
49 #include <net/if.h>
50 #include <net/if_dl.h>
51 #include <net/if_types.h>
52 #include <net/ifq_var.h>
53 
54 #include <netinet/in.h>
55 #include <netinet/in_systm.h>
56 #include <netinet/ip.h>
57 #ifdef INET6
58 #include <netinet/ip6.h>
59 #endif
60 #include <netinet/tcp.h>
61 #include <netinet/udp.h>
62 
63 #include <net/pf/pfvar.h>
64 #include <net/altq/altq.h>
65 
66 /* machine dependent clock related includes */
67 #if defined(__i386__)
68 #include <machine/clock.h>		/* for tsc_freq */
69 #include <machine/md_var.h>		/* for cpu_feature */
70 #include <machine/specialreg.h>		/* for CPUID_TSC */
71 #endif /* __i386__ */
72 
73 /*
74  * internal function prototypes
75  */
76 static void	tbr_timeout(void *);
77 int (*altq_input)(struct mbuf *, int) = NULL;
78 static int tbr_timer = 0;	/* token bucket regulator timer */
79 static struct callout tbr_callout;
80 
81 int pfaltq_running;	/* keep track of running state */
82 
83 MALLOC_DEFINE(M_ALTQ, "altq", "ALTQ structures");
84 
85 /*
86  * alternate queueing support routines
87  */
88 
89 /* look up the queue state by the interface name and the queueing type. */
90 void *
91 altq_lookup(const char *name, int type)
92 {
93 	struct ifnet *ifp;
94 
95 	if ((ifp = ifunit(name)) != NULL) {
96 		if (type != ALTQT_NONE && ifp->if_snd.altq_type == type)
97 			return (ifp->if_snd.altq_disc);
98 	}
99 
100 	return (NULL);
101 }
102 
103 int
104 altq_attach(struct ifaltq *ifq, int type, void *discipline,
105 	    int (*enqueue)(struct ifaltq *, struct mbuf *, struct altq_pktattr *),
106 	    struct mbuf *(*dequeue)(struct ifaltq *, int),
107 	    int (*request)(struct ifaltq *, int, void *),
108 	    void *clfier,
109 	    void *(*classify)(struct ifaltq *, struct mbuf *,
110 			      struct altq_pktattr *))
111 {
112 	if (!ifq_is_ready(ifq))
113 		return ENXIO;
114 
115 	ifq->altq_type     = type;
116 	ifq->altq_disc     = discipline;
117 	ifq->altq_enqueue  = enqueue;
118 	ifq->altq_dequeue  = dequeue;
119 	ifq->altq_request  = request;
120 	ifq->altq_clfier   = clfier;
121 	ifq->altq_classify = classify;
122 	ifq->altq_flags &= (ALTQF_CANTCHANGE|ALTQF_ENABLED);
123 	return 0;
124 }
125 
126 int
127 altq_detach(struct ifaltq *ifq)
128 {
129 	if (!ifq_is_ready(ifq))
130 		return ENXIO;
131 	if (ifq_is_enabled(ifq))
132 		return EBUSY;
133 	if (!ifq_is_attached(ifq))
134 		return (0);
135 
136 	ifq_set_classic(ifq);
137 	ifq->altq_type     = ALTQT_NONE;
138 	ifq->altq_disc     = NULL;
139 	ifq->altq_clfier   = NULL;
140 	ifq->altq_classify = NULL;
141 	ifq->altq_flags &= ALTQF_CANTCHANGE;
142 	return 0;
143 }
144 
145 int
146 altq_enable(struct ifaltq *ifq)
147 {
148 	if (!ifq_is_ready(ifq))
149 		return ENXIO;
150 	if (ifq_is_enabled(ifq))
151 		return 0;
152 
153 	crit_enter();
154 	ifq_purge(ifq);
155 	KKASSERT(ifq->ifq_len == 0);
156 	ifq->altq_flags |= ALTQF_ENABLED;
157 	if (ifq->altq_clfier != NULL)
158 		ifq->altq_flags |= ALTQF_CLASSIFY;
159 	crit_exit();
160 
161 	return 0;
162 }
163 
164 int
165 altq_disable(struct ifaltq *ifq)
166 {
167 	if (!ifq_is_enabled(ifq))
168 		return 0;
169 
170 	crit_enter();
171 	ifq_purge(ifq);
172 	KKASSERT(ifq->ifq_len == 0);
173 	ifq->altq_flags &= ~(ALTQF_ENABLED|ALTQF_CLASSIFY);
174 	crit_exit();
175 	return 0;
176 }
177 
178 /*
179  * internal representation of token bucket parameters
180  *	rate:	byte_per_unittime << 32
181  *		(((bits_per_sec) / 8) << 32) / machclk_freq
182  *	depth:	byte << 32
183  *
184  */
185 #define	TBR_SHIFT	32
186 #define	TBR_SCALE(x)	((int64_t)(x) << TBR_SHIFT)
187 #define	TBR_UNSCALE(x)	((x) >> TBR_SHIFT)
188 
189 struct mbuf *
190 tbr_dequeue(struct ifaltq *ifq, int op)
191 {
192 	struct tb_regulator *tbr;
193 	struct mbuf *m;
194 	int64_t interval;
195 	uint64_t now;
196 
197 	crit_enter();
198 	tbr = ifq->altq_tbr;
199 	if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
200 		/* if this is a remove after poll, bypass tbr check */
201 	} else {
202 		/* update token only when it is negative */
203 		if (tbr->tbr_token <= 0) {
204 			now = read_machclk();
205 			interval = now - tbr->tbr_last;
206 			if (interval >= tbr->tbr_filluptime)
207 				tbr->tbr_token = tbr->tbr_depth;
208 			else {
209 				tbr->tbr_token += interval * tbr->tbr_rate;
210 				if (tbr->tbr_token > tbr->tbr_depth)
211 					tbr->tbr_token = tbr->tbr_depth;
212 			}
213 			tbr->tbr_last = now;
214 		}
215 		/* if token is still negative, don't allow dequeue */
216 		if (tbr->tbr_token <= 0) {
217 			crit_exit();
218 			return (NULL);
219 		}
220 	}
221 
222 	if (ifq_is_enabled(ifq))
223 		m = (*ifq->altq_dequeue)(ifq, op);
224 	else if (op == ALTDQ_POLL)
225 		IF_POLL(ifq, m);
226 	else
227 		IF_DEQUEUE(ifq, m);
228 
229 	if (m != NULL && op == ALTDQ_REMOVE)
230 		tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
231 	tbr->tbr_lastop = op;
232 	crit_exit();
233 	return (m);
234 }
235 
236 /*
237  * set a token bucket regulator.
238  * if the specified rate is zero, the token bucket regulator is deleted.
239  */
240 int
241 tbr_set(struct ifaltq *ifq, struct tb_profile *profile)
242 {
243 	struct tb_regulator *tbr, *otbr;
244 
245 	if (machclk_freq == 0)
246 		init_machclk();
247 	if (machclk_freq == 0) {
248 		printf("tbr_set: no cpu clock available!\n");
249 		return (ENXIO);
250 	}
251 
252 	if (profile->rate == 0) {
253 		/* delete this tbr */
254 		if ((tbr = ifq->altq_tbr) == NULL)
255 			return (ENOENT);
256 		ifq->altq_tbr = NULL;
257 		free(tbr, M_ALTQ);
258 		return (0);
259 	}
260 
261 	tbr = malloc(sizeof(*tbr), M_ALTQ, M_WAITOK | M_ZERO);
262 	tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
263 	tbr->tbr_depth = TBR_SCALE(profile->depth);
264 	if (tbr->tbr_rate > 0)
265 		tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
266 	else
267 		tbr->tbr_filluptime = 0xffffffffffffffffLL;
268 	tbr->tbr_token = tbr->tbr_depth;
269 	tbr->tbr_last = read_machclk();
270 	tbr->tbr_lastop = ALTDQ_REMOVE;
271 
272 	otbr = ifq->altq_tbr;
273 	ifq->altq_tbr = tbr;	/* set the new tbr */
274 
275 	if (otbr != NULL)
276 		free(otbr, M_ALTQ);
277 	else if (tbr_timer == 0) {
278 		callout_reset(&tbr_callout, 1, tbr_timeout, NULL);
279 		tbr_timer = 1;
280 	}
281 	return (0);
282 }
283 
284 /*
285  * tbr_timeout goes through the interface list, and kicks the drivers
286  * if necessary.
287  */
288 static void
289 tbr_timeout(void *arg)
290 {
291 	struct ifnet *ifp;
292 	int active;
293 
294 	active = 0;
295 	crit_enter();
296 	for (ifp = TAILQ_FIRST(&ifnet); ifp; ifp = TAILQ_NEXT(ifp, if_list)) {
297 		if (ifp->if_snd.altq_tbr == NULL)
298 			continue;
299 		active++;
300 		if (!ifq_is_empty(&ifp->if_snd) && ifp->if_start != NULL)
301 			(*ifp->if_start)(ifp);
302 	}
303 	crit_exit();
304 	if (active > 0)
305 		callout_reset(&tbr_callout, 1, tbr_timeout, NULL);
306 	else
307 		tbr_timer = 0;	/* don't need tbr_timer anymore */
308 }
309 
310 /*
311  * get token bucket regulator profile
312  */
313 int
314 tbr_get(struct ifaltq *ifq, struct tb_profile *profile)
315 {
316 	struct tb_regulator *tbr;
317 
318 	if ((tbr = ifq->altq_tbr) == NULL) {
319 		profile->rate = 0;
320 		profile->depth = 0;
321 	} else {
322 		profile->rate =
323 		    (u_int)TBR_UNSCALE(tbr->tbr_rate * 8 * machclk_freq);
324 		profile->depth = (u_int)TBR_UNSCALE(tbr->tbr_depth);
325 	}
326 	return (0);
327 }
328 
329 /*
330  * attach a discipline to the interface.  if one already exists, it is
331  * overridden.
332  */
333 int
334 altq_pfattach(struct pf_altq *a)
335 {
336 	struct ifnet *ifp;
337 	struct tb_profile tb;
338 	int error = 0;
339 
340 	switch (a->scheduler) {
341 	case ALTQT_NONE:
342 		break;
343 #ifdef ALTQ_CBQ
344 	case ALTQT_CBQ:
345 		error = cbq_pfattach(a);
346 		break;
347 #endif
348 #ifdef ALTQ_PRIQ
349 	case ALTQT_PRIQ:
350 		error = priq_pfattach(a);
351 		break;
352 #endif
353 #ifdef ALTQ_HFSC
354 	case ALTQT_HFSC:
355 		error = hfsc_pfattach(a);
356 		break;
357 #endif
358 	default:
359 		error = ENXIO;
360 	}
361 
362 	ifp = ifunit(a->ifname);
363 
364 	/* if the state is running, enable altq */
365 	if (error == 0 && pfaltq_running &&
366 	    ifp != NULL && ifp->if_snd.altq_type != ALTQT_NONE &&
367 	    !ifq_is_enabled(&ifp->if_snd))
368 			error = altq_enable(&ifp->if_snd);
369 
370 	/* if altq is already enabled, reset set tokenbucket regulator */
371 	if (error == 0 && ifp != NULL && ifq_is_enabled(&ifp->if_snd)) {
372 		tb.rate = a->ifbandwidth;
373 		tb.depth = a->tbrsize;
374 		crit_enter();
375 		error = tbr_set(&ifp->if_snd, &tb);
376 		crit_exit();
377 	}
378 
379 	return (error);
380 }
381 
382 /*
383  * detach a discipline from the interface.
384  * it is possible that the discipline was already overridden by another
385  * discipline.
386  */
387 int
388 altq_pfdetach(struct pf_altq *a)
389 {
390 	struct ifnet *ifp;
391 	int error = 0;
392 
393 	if ((ifp = ifunit(a->ifname)) == NULL)
394 		return (EINVAL);
395 
396 	/* if this discipline is no longer referenced, just return */
397 	if (a->altq_disc == NULL || a->altq_disc != ifp->if_snd.altq_disc)
398 		return (0);
399 
400 	crit_enter();
401 	if (ifq_is_enabled(&ifp->if_snd))
402 		error = altq_disable(&ifp->if_snd);
403 	if (error == 0)
404 		error = altq_detach(&ifp->if_snd);
405 	crit_exit();
406 
407 	return (error);
408 }
409 
410 /*
411  * add a discipline or a queue
412  */
413 int
414 altq_add(struct pf_altq *a)
415 {
416 	int error = 0;
417 
418 	if (a->qname[0] != 0)
419 		return (altq_add_queue(a));
420 
421 	if (machclk_freq == 0)
422 		init_machclk();
423 	if (machclk_freq == 0)
424 		panic("altq_add: no cpu clock");
425 
426 	switch (a->scheduler) {
427 #ifdef ALTQ_CBQ
428 	case ALTQT_CBQ:
429 		error = cbq_add_altq(a);
430 		break;
431 #endif
432 #ifdef ALTQ_PRIQ
433 	case ALTQT_PRIQ:
434 		error = priq_add_altq(a);
435 		break;
436 #endif
437 #ifdef ALTQ_HFSC
438 	case ALTQT_HFSC:
439 		error = hfsc_add_altq(a);
440 		break;
441 #endif
442 	default:
443 		error = ENXIO;
444 	}
445 
446 	return (error);
447 }
448 
449 /*
450  * remove a discipline or a queue
451  */
452 int
453 altq_remove(struct pf_altq *a)
454 {
455 	int error = 0;
456 
457 	if (a->qname[0] != 0)
458 		return (altq_remove_queue(a));
459 
460 	switch (a->scheduler) {
461 #ifdef ALTQ_CBQ
462 	case ALTQT_CBQ:
463 		error = cbq_remove_altq(a);
464 		break;
465 #endif
466 #ifdef ALTQ_PRIQ
467 	case ALTQT_PRIQ:
468 		error = priq_remove_altq(a);
469 		break;
470 #endif
471 #ifdef ALTQ_HFSC
472 	case ALTQT_HFSC:
473 		error = hfsc_remove_altq(a);
474 		break;
475 #endif
476 	default:
477 		error = ENXIO;
478 	}
479 
480 	return (error);
481 }
482 
483 /*
484  * add a queue to the discipline
485  */
486 int
487 altq_add_queue(struct pf_altq *a)
488 {
489 	int error = 0;
490 
491 	switch (a->scheduler) {
492 #ifdef ALTQ_CBQ
493 	case ALTQT_CBQ:
494 		error = cbq_add_queue(a);
495 		break;
496 #endif
497 #ifdef ALTQ_PRIQ
498 	case ALTQT_PRIQ:
499 		error = priq_add_queue(a);
500 		break;
501 #endif
502 #ifdef ALTQ_HFSC
503 	case ALTQT_HFSC:
504 		error = hfsc_add_queue(a);
505 		break;
506 #endif
507 	default:
508 		error = ENXIO;
509 	}
510 
511 	return (error);
512 }
513 
514 /*
515  * remove a queue from the discipline
516  */
517 int
518 altq_remove_queue(struct pf_altq *a)
519 {
520 	int error = 0;
521 
522 	switch (a->scheduler) {
523 #ifdef ALTQ_CBQ
524 	case ALTQT_CBQ:
525 		error = cbq_remove_queue(a);
526 		break;
527 #endif
528 #ifdef ALTQ_PRIQ
529 	case ALTQT_PRIQ:
530 		error = priq_remove_queue(a);
531 		break;
532 #endif
533 #ifdef ALTQ_HFSC
534 	case ALTQT_HFSC:
535 		error = hfsc_remove_queue(a);
536 		break;
537 #endif
538 	default:
539 		error = ENXIO;
540 	}
541 
542 	return (error);
543 }
544 
545 /*
546  * get queue statistics
547  */
548 int
549 altq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
550 {
551 	int error = 0;
552 
553 	switch (a->scheduler) {
554 #ifdef ALTQ_CBQ
555 	case ALTQT_CBQ:
556 		error = cbq_getqstats(a, ubuf, nbytes);
557 		break;
558 #endif
559 #ifdef ALTQ_PRIQ
560 	case ALTQT_PRIQ:
561 		error = priq_getqstats(a, ubuf, nbytes);
562 		break;
563 #endif
564 #ifdef ALTQ_HFSC
565 	case ALTQT_HFSC:
566 		error = hfsc_getqstats(a, ubuf, nbytes);
567 		break;
568 #endif
569 	default:
570 		error = ENXIO;
571 	}
572 
573 	return (error);
574 }
575 
576 /*
577  * read and write diffserv field in IPv4 or IPv6 header
578  */
579 uint8_t
580 read_dsfield(struct mbuf *m, struct altq_pktattr *pktattr)
581 {
582 	struct mbuf *m0;
583 	uint8_t ds_field = 0;
584 
585 	if (pktattr == NULL ||
586 	    (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
587 		return ((uint8_t)0);
588 
589 	/* verify that pattr_hdr is within the mbuf data */
590 	for (m0 = m; m0 != NULL; m0 = m0->m_next) {
591 		if ((pktattr->pattr_hdr >= m0->m_data) &&
592 		    (pktattr->pattr_hdr < m0->m_data + m0->m_len))
593 			break;
594 	}
595 	if (m0 == NULL) {
596 		/* ick, pattr_hdr is stale */
597 		pktattr->pattr_af = AF_UNSPEC;
598 #ifdef ALTQ_DEBUG
599 		printf("read_dsfield: can't locate header!\n");
600 #endif
601 		return ((uint8_t)0);
602 	}
603 
604 	if (pktattr->pattr_af == AF_INET) {
605 		struct ip *ip = (struct ip *)pktattr->pattr_hdr;
606 
607 		if (ip->ip_v != 4)
608 			return ((uint8_t)0);	/* version mismatch! */
609 		ds_field = ip->ip_tos;
610 	}
611 #ifdef INET6
612 	else if (pktattr->pattr_af == AF_INET6) {
613 		struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
614 		uint32_t flowlabel;
615 
616 		flowlabel = ntohl(ip6->ip6_flow);
617 		if ((flowlabel >> 28) != 6)
618 			return ((uint8_t)0);	/* version mismatch! */
619 		ds_field = (flowlabel >> 20) & 0xff;
620 	}
621 #endif
622 	return (ds_field);
623 }
624 
625 void
626 write_dsfield(struct mbuf *m, struct altq_pktattr *pktattr, uint8_t dsfield)
627 {
628 	struct mbuf *m0;
629 
630 	if (pktattr == NULL ||
631 	    (pktattr->pattr_af != AF_INET && pktattr->pattr_af != AF_INET6))
632 		return;
633 
634 	/* verify that pattr_hdr is within the mbuf data */
635 	for (m0 = m; m0 != NULL; m0 = m0->m_next) {
636 		if ((pktattr->pattr_hdr >= m0->m_data) &&
637 		    (pktattr->pattr_hdr < m0->m_data + m0->m_len))
638 			break;
639 	}
640 	if (m0 == NULL) {
641 		/* ick, pattr_hdr is stale */
642 		pktattr->pattr_af = AF_UNSPEC;
643 #ifdef ALTQ_DEBUG
644 		printf("write_dsfield: can't locate header!\n");
645 #endif
646 		return;
647 	}
648 
649 	if (pktattr->pattr_af == AF_INET) {
650 		struct ip *ip = (struct ip *)pktattr->pattr_hdr;
651 		uint8_t old;
652 		int32_t sum;
653 
654 		if (ip->ip_v != 4)
655 			return;		/* version mismatch! */
656 		old = ip->ip_tos;
657 		dsfield |= old & 3;	/* leave CU bits */
658 		if (old == dsfield)
659 			return;
660 		ip->ip_tos = dsfield;
661 		/*
662 		 * update checksum (from RFC1624)
663 		 *	   HC' = ~(~HC + ~m + m')
664 		 */
665 		sum = ~ntohs(ip->ip_sum) & 0xffff;
666 		sum += 0xff00 + (~old & 0xff) + dsfield;
667 		sum = (sum >> 16) + (sum & 0xffff);
668 		sum += (sum >> 16);  /* add carry */
669 
670 		ip->ip_sum = htons(~sum & 0xffff);
671 	}
672 #ifdef INET6
673 	else if (pktattr->pattr_af == AF_INET6) {
674 		struct ip6_hdr *ip6 = (struct ip6_hdr *)pktattr->pattr_hdr;
675 		uint32_t flowlabel;
676 
677 		flowlabel = ntohl(ip6->ip6_flow);
678 		if ((flowlabel >> 28) != 6)
679 			return;		/* version mismatch! */
680 		flowlabel = (flowlabel & 0xf03fffff) | (dsfield << 20);
681 		ip6->ip6_flow = htonl(flowlabel);
682 	}
683 #endif
684 }
685 
686 /*
687  * high resolution clock support taking advantage of a machine dependent
688  * high resolution time counter (e.g., timestamp counter of intel pentium).
689  * we assume
690  *  - 64-bit-long monotonically-increasing counter
691  *  - frequency range is 100M-4GHz (CPU speed)
692  */
693 /* if pcc is not available or disabled, emulate 256MHz using microtime() */
694 #define	MACHCLK_SHIFT	8
695 
696 int machclk_usepcc;
697 uint32_t machclk_freq = 0;
698 uint32_t machclk_per_tick = 0;
699 
700 void
701 init_machclk(void)
702 {
703 	callout_init(&tbr_callout);
704 
705 	machclk_usepcc = 1;
706 
707 #if !defined(__i386__) || defined(ALTQ_NOPCC)
708 	machclk_usepcc = 0;
709 #elif defined(__DragonFly__) && defined(SMP)
710 	machclk_usepcc = 0;
711 #elif defined(__i386__)
712 	/* check if TSC is available */
713 	if (machclk_usepcc == 1 && (cpu_feature & CPUID_TSC) == 0)
714 		machclk_usepcc = 0;
715 #endif
716 
717 	if (machclk_usepcc == 0) {
718 		/* emulate 256MHz using microtime() */
719 		machclk_freq = 1000000 << MACHCLK_SHIFT;
720 		machclk_per_tick = machclk_freq / hz;
721 #ifdef ALTQ_DEBUG
722 		printf("altq: emulate %uHz cpu clock\n", machclk_freq);
723 #endif
724 		return;
725 	}
726 
727 	/*
728 	 * if the clock frequency (of Pentium TSC or Alpha PCC) is
729 	 * accessible, just use it.
730 	 */
731 #ifdef __i386__
732 	machclk_freq = tsc_freq;
733 #else
734 #error "machclk_freq interface not implemented"
735 #endif
736 
737 	/*
738 	 * if we don't know the clock frequency, measure it.
739 	 */
740 	if (machclk_freq == 0) {
741 		static int	wait;
742 		struct timeval	tv_start, tv_end;
743 		uint64_t	start, end, diff;
744 		int		timo;
745 
746 		microtime(&tv_start);
747 		start = read_machclk();
748 		timo = hz;	/* 1 sec */
749 		tsleep(&wait, PCATCH, "init_machclk", timo);
750 		microtime(&tv_end);
751 		end = read_machclk();
752 		diff = (uint64_t)(tv_end.tv_sec - tv_start.tv_sec) * 1000000
753 		    + tv_end.tv_usec - tv_start.tv_usec;
754 		if (diff != 0)
755 			machclk_freq = (u_int)((end - start) * 1000000 / diff);
756 	}
757 
758 	machclk_per_tick = machclk_freq / hz;
759 
760 #ifdef ALTQ_DEBUG
761 	printf("altq: CPU clock: %uHz\n", machclk_freq);
762 #endif
763 }
764 
765 uint64_t
766 read_machclk(void)
767 {
768 	uint64_t val;
769 
770 	if (machclk_usepcc) {
771 #if defined(__i386__)
772 		val = rdtsc();
773 #else
774 		panic("read_machclk");
775 #endif
776 	} else {
777 		struct timeval tv;
778 
779 		microtime(&tv);
780 		val = (((uint64_t)(tv.tv_sec - boottime.tv_sec) * 1000000
781 		    + tv.tv_usec) << MACHCLK_SHIFT);
782 	}
783 	return (val);
784 }
785