xref: /illumos-gate/usr/src/uts/i86pc/os/mp_machdep.c (revision 7257d1b4)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 #define	PSMI_1_6
29 #include <sys/smp_impldefs.h>
30 #include <sys/psm.h>
31 #include <sys/psm_modctl.h>
32 #include <sys/pit.h>
33 #include <sys/cmn_err.h>
34 #include <sys/strlog.h>
35 #include <sys/clock.h>
36 #include <sys/debug.h>
37 #include <sys/rtc.h>
38 #include <sys/x86_archext.h>
39 #include <sys/cpupart.h>
40 #include <sys/cpuvar.h>
41 #include <sys/cmt.h>
42 #include <sys/cpu.h>
43 #include <sys/disp.h>
44 #include <sys/archsystm.h>
45 #include <sys/machsystm.h>
46 #include <sys/sysmacros.h>
47 #include <sys/memlist.h>
48 #include <sys/param.h>
49 #include <sys/promif.h>
50 #if defined(__xpv)
51 #include <sys/hypervisor.h>
52 #endif
53 #include <sys/mach_intr.h>
54 #include <vm/hat_i86.h>
55 #include <sys/kdi_machimpl.h>
56 #include <sys/sdt.h>
57 
58 #define	OFFSETOF(s, m)		(size_t)(&(((s *)0)->m))
59 
60 /*
61  *	Local function prototypes
62  */
63 static int mp_disable_intr(processorid_t cpun);
64 static void mp_enable_intr(processorid_t cpun);
65 static void mach_init();
66 static void mach_picinit();
67 static int machhztomhz(uint64_t cpu_freq_hz);
68 static uint64_t mach_getcpufreq(void);
69 static void mach_fixcpufreq(void);
70 static int mach_clkinit(int, int *);
71 static void mach_smpinit(void);
72 static int mach_softlvl_to_vect(int ipl);
73 static void mach_get_platform(int owner);
74 static void mach_construct_info();
75 static int mach_translate_irq(dev_info_t *dip, int irqno);
76 static int mach_intr_ops(dev_info_t *, ddi_intr_handle_impl_t *,
77     psm_intr_op_t, int *);
78 static void mach_notify_error(int level, char *errmsg);
79 static hrtime_t dummy_hrtime(void);
80 static void dummy_scalehrtime(hrtime_t *);
81 static void cpu_idle(void);
82 static void cpu_wakeup(cpu_t *, int);
83 #ifndef __xpv
84 static void cpu_idle_mwait(void);
85 static void cpu_wakeup_mwait(cpu_t *, int);
86 #endif
87 /*
88  *	External reference functions
89  */
90 extern void return_instr();
91 extern uint64_t freq_tsc(uint32_t *);
92 #if defined(__i386)
93 extern uint64_t freq_notsc(uint32_t *);
94 #endif
95 extern void pc_gethrestime(timestruc_t *);
96 extern int cpuid_get_coreid(cpu_t *);
97 extern int cpuid_get_chipid(cpu_t *);
98 
99 /*
100  *	PSM functions initialization
101  */
102 void (*psm_shutdownf)(int, int)	= (void (*)(int, int))return_instr;
103 void (*psm_preshutdownf)(int, int) = (void (*)(int, int))return_instr;
104 void (*psm_notifyf)(int)	= (void (*)(int))return_instr;
105 void (*psm_set_idle_cpuf)(int)	= (void (*)(int))return_instr;
106 void (*psm_unset_idle_cpuf)(int) = (void (*)(int))return_instr;
107 void (*psminitf)()		= mach_init;
108 void (*picinitf)() 		= return_instr;
109 int (*clkinitf)(int, int *) 	= (int (*)(int, int *))return_instr;
110 int (*ap_mlsetup)() 		= (int (*)(void))return_instr;
111 void (*send_dirintf)() 		= return_instr;
112 void (*setspl)(int)		= (void (*)(int))return_instr;
113 int (*addspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
114 int (*delspl)(int, int, int, int) = (int (*)(int, int, int, int))return_instr;
115 void (*kdisetsoftint)(int, struct av_softinfo *)=
116 	(void (*)(int, struct av_softinfo *))return_instr;
117 void (*setsoftint)(int, struct av_softinfo *)=
118 	(void (*)(int, struct av_softinfo *))return_instr;
119 int (*slvltovect)(int)		= (int (*)(int))return_instr;
120 int (*setlvl)(int, int *)	= (int (*)(int, int *))return_instr;
121 void (*setlvlx)(int, int)	= (void (*)(int, int))return_instr;
122 int (*psm_disable_intr)(int)	= mp_disable_intr;
123 void (*psm_enable_intr)(int)	= mp_enable_intr;
124 hrtime_t (*gethrtimef)(void)	= dummy_hrtime;
125 hrtime_t (*gethrtimeunscaledf)(void)	= dummy_hrtime;
126 void (*scalehrtimef)(hrtime_t *)	= dummy_scalehrtime;
127 int (*psm_translate_irq)(dev_info_t *, int) = mach_translate_irq;
128 void (*gethrestimef)(timestruc_t *) = pc_gethrestime;
129 void (*psm_notify_error)(int, char *) = (void (*)(int, char *))NULL;
130 int (*psm_get_clockirq)(int) = NULL;
131 int (*psm_get_ipivect)(int, int) = NULL;
132 
133 int (*psm_clkinit)(int) = NULL;
134 void (*psm_timer_reprogram)(hrtime_t) = NULL;
135 void (*psm_timer_enable)(void) = NULL;
136 void (*psm_timer_disable)(void) = NULL;
137 void (*psm_post_cyclic_setup)(void *arg) = NULL;
138 int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, psm_intr_op_t,
139     int *) = mach_intr_ops;
140 int (*psm_state)(psm_state_request_t *) = (int (*)(psm_state_request_t *))
141     return_instr;
142 
143 void (*notify_error)(int, char *) = (void (*)(int, char *))return_instr;
144 void (*hrtime_tick)(void)	= return_instr;
145 
146 /*
147  * True if the generic TSC code is our source of hrtime, rather than whatever
148  * the PSM can provide.
149  */
150 #ifdef __xpv
151 int tsc_gethrtime_enable = 0;
152 #else
153 int tsc_gethrtime_enable = 1;
154 #endif
155 int tsc_gethrtime_initted = 0;
156 
157 /*
158  * True if the hrtime implementation is "hires"; namely, better than microdata.
159  */
160 int gethrtime_hires = 0;
161 
162 /*
163  * Local Static Data
164  */
165 static struct psm_ops mach_ops;
166 static struct psm_ops *mach_set[4] = {&mach_ops, NULL, NULL, NULL};
167 static ushort_t mach_ver[4] = {0, 0, 0, 0};
168 
169 /*
170  * If non-zero, idle cpus will become "halted" when there's
171  * no work to do.
172  */
173 int	idle_cpu_use_hlt = 1;
174 
175 #ifndef __xpv
176 /*
177  * If non-zero, idle cpus will use mwait if available to halt instead of hlt.
178  */
179 int	idle_cpu_prefer_mwait = 1;
180 #endif
181 
182 /*ARGSUSED*/
183 int
184 pg_plat_hw_shared(cpu_t *cp, pghw_type_t hw)
185 {
186 	switch (hw) {
187 	case PGHW_IPIPE:
188 		if (x86_feature & (X86_HTT)) {
189 			/*
190 			 * Hyper-threading is SMT
191 			 */
192 			return (1);
193 		} else {
194 			return (0);
195 		}
196 	case PGHW_CHIP:
197 		if (x86_feature & (X86_CMP|X86_HTT))
198 			return (1);
199 		else
200 			return (0);
201 	case PGHW_CACHE:
202 		if (cpuid_get_ncpu_sharing_last_cache(cp) > 1)
203 			return (1);
204 		else
205 			return (0);
206 	default:
207 		return (0);
208 	}
209 }
210 
211 /*
212  * Compare two CPUs and see if they have a pghw_type_t sharing relationship
213  * If pghw_type_t is an unsupported hardware type, then return -1
214  */
215 int
216 pg_plat_cpus_share(cpu_t *cpu_a, cpu_t *cpu_b, pghw_type_t hw)
217 {
218 	id_t pgp_a, pgp_b;
219 
220 	pgp_a = pg_plat_hw_instance_id(cpu_a, hw);
221 	pgp_b = pg_plat_hw_instance_id(cpu_b, hw);
222 
223 	if (pgp_a == -1 || pgp_b == -1)
224 		return (-1);
225 
226 	return (pgp_a == pgp_b);
227 }
228 
229 /*
230  * Return a physical instance identifier for known hardware sharing
231  * relationships
232  */
233 id_t
234 pg_plat_hw_instance_id(cpu_t *cpu, pghw_type_t hw)
235 {
236 	switch (hw) {
237 	case PGHW_IPIPE:
238 		return (cpuid_get_coreid(cpu));
239 	case PGHW_CACHE:
240 		return (cpuid_get_last_lvl_cacheid(cpu));
241 	case PGHW_CHIP:
242 		return (cpuid_get_chipid(cpu));
243 	default:
244 		return (-1);
245 	}
246 }
247 
248 int
249 pg_plat_hw_level(pghw_type_t hw)
250 {
251 	int i;
252 	static pghw_type_t hw_hier[] = {
253 		PGHW_IPIPE,
254 		PGHW_CACHE,
255 		PGHW_CHIP,
256 		PGHW_NUM_COMPONENTS
257 	};
258 
259 	for (i = 0; hw_hier[i] != PGHW_NUM_COMPONENTS; i++) {
260 		if (hw_hier[i] == hw)
261 			return (i);
262 	}
263 	return (-1);
264 }
265 
266 /*
267  * Return 1 if CMT load balancing policies should be
268  * implemented across instances of the specified hardware
269  * sharing relationship.
270  */
271 int
272 pg_plat_cmt_load_bal_hw(pghw_type_t hw)
273 {
274 	if (hw == PGHW_IPIPE ||
275 	    hw == PGHW_FPU ||
276 	    hw == PGHW_CHIP ||
277 	    hw == PGHW_CACHE)
278 		return (1);
279 	else
280 		return (0);
281 }
282 
283 
284 /*
285  * Return 1 if thread affinity polices should be implemented
286  * for instances of the specifed hardware sharing relationship.
287  */
288 int
289 pg_plat_cmt_affinity_hw(pghw_type_t hw)
290 {
291 	if (hw == PGHW_CACHE)
292 		return (1);
293 	else
294 		return (0);
295 }
296 
297 id_t
298 pg_plat_get_core_id(cpu_t *cpu)
299 {
300 	return ((id_t)cpuid_get_coreid(cpu));
301 }
302 
303 void
304 cmp_set_nosteal_interval(void)
305 {
306 	/* Set the nosteal interval (used by disp_getbest()) to 100us */
307 	nosteal_nsec = 100000UL;
308 }
309 
310 /*
311  * Routine to ensure initial callers to hrtime gets 0 as return
312  */
313 static hrtime_t
314 dummy_hrtime(void)
315 {
316 	return (0);
317 }
318 
319 /* ARGSUSED */
320 static void
321 dummy_scalehrtime(hrtime_t *ticks)
322 {}
323 
324 /*
325  * Idle the present CPU until awoken via an interrupt
326  */
327 static void
328 cpu_idle(void)
329 {
330 	cpu_t		*cpup = CPU;
331 	processorid_t	cpun = cpup->cpu_id;
332 	cpupart_t	*cp = cpup->cpu_part;
333 	int		hset_update = 1;
334 
335 	/*
336 	 * If this CPU is online, and there's multiple CPUs
337 	 * in the system, then we should notate our halting
338 	 * by adding ourselves to the partition's halted CPU
339 	 * bitmap. This allows other CPUs to find/awaken us when
340 	 * work becomes available.
341 	 */
342 	if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
343 		hset_update = 0;
344 
345 	/*
346 	 * Add ourselves to the partition's halted CPUs bitmask
347 	 * and set our HALTED flag, if necessary.
348 	 *
349 	 * When a thread becomes runnable, it is placed on the queue
350 	 * and then the halted cpuset is checked to determine who
351 	 * (if anyone) should be awoken. We therefore need to first
352 	 * add ourselves to the halted cpuset, and and then check if there
353 	 * is any work available.
354 	 *
355 	 * Note that memory barriers after updating the HALTED flag
356 	 * are not necessary since an atomic operation (updating the bitmap)
357 	 * immediately follows. On x86 the atomic operation acts as a
358 	 * memory barrier for the update of cpu_disp_flags.
359 	 */
360 	if (hset_update) {
361 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
362 		CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun);
363 	}
364 
365 	/*
366 	 * Check to make sure there's really nothing to do.
367 	 * Work destined for this CPU may become available after
368 	 * this check. We'll be notified through the clearing of our
369 	 * bit in the halted CPU bitmask, and a poke.
370 	 */
371 	if (disp_anywork()) {
372 		if (hset_update) {
373 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
374 			CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
375 		}
376 		return;
377 	}
378 
379 	/*
380 	 * We're on our way to being halted.
381 	 *
382 	 * Disable interrupts now, so that we'll awaken immediately
383 	 * after halting if someone tries to poke us between now and
384 	 * the time we actually halt.
385 	 *
386 	 * We check for the presence of our bit after disabling interrupts.
387 	 * If it's cleared, we'll return. If the bit is cleared after
388 	 * we check then the poke will pop us out of the halted state.
389 	 *
390 	 * This means that the ordering of the poke and the clearing
391 	 * of the bit by cpu_wakeup is important.
392 	 * cpu_wakeup() must clear, then poke.
393 	 * cpu_idle() must disable interrupts, then check for the bit.
394 	 */
395 	cli();
396 
397 	if (hset_update && !CPU_IN_SET(cp->cp_mach->mc_haltset, cpun)) {
398 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
399 		sti();
400 		return;
401 	}
402 
403 	/*
404 	 * The check for anything locally runnable is here for performance
405 	 * and isn't needed for correctness. disp_nrunnable ought to be
406 	 * in our cache still, so it's inexpensive to check, and if there
407 	 * is anything runnable we won't have to wait for the poke.
408 	 */
409 	if (cpup->cpu_disp->disp_nrunnable != 0) {
410 		if (hset_update) {
411 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
412 			CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
413 		}
414 		sti();
415 		return;
416 	}
417 
418 	mach_cpu_idle();
419 
420 	/*
421 	 * We're no longer halted
422 	 */
423 	if (hset_update) {
424 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
425 		CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
426 	}
427 }
428 
429 
430 /*
431  * If "cpu" is halted, then wake it up clearing its halted bit in advance.
432  * Otherwise, see if other CPUs in the cpu partition are halted and need to
433  * be woken up so that they can steal the thread we placed on this CPU.
434  * This function is only used on MP systems.
435  */
436 static void
437 cpu_wakeup(cpu_t *cpu, int bound)
438 {
439 	uint_t		cpu_found;
440 	int		result;
441 	cpupart_t	*cp;
442 
443 	cp = cpu->cpu_part;
444 	if (CPU_IN_SET(cp->cp_mach->mc_haltset, cpu->cpu_id)) {
445 		/*
446 		 * Clear the halted bit for that CPU since it will be
447 		 * poked in a moment.
448 		 */
449 		CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpu->cpu_id);
450 		/*
451 		 * We may find the current CPU present in the halted cpuset
452 		 * if we're in the context of an interrupt that occurred
453 		 * before we had a chance to clear our bit in cpu_idle().
454 		 * Poking ourself is obviously unnecessary, since if
455 		 * we're here, we're not halted.
456 		 */
457 		if (cpu != CPU)
458 			poke_cpu(cpu->cpu_id);
459 		return;
460 	} else {
461 		/*
462 		 * This cpu isn't halted, but it's idle or undergoing a
463 		 * context switch. No need to awaken anyone else.
464 		 */
465 		if (cpu->cpu_thread == cpu->cpu_idle_thread ||
466 		    cpu->cpu_disp_flags & CPU_DISP_DONTSTEAL)
467 			return;
468 	}
469 
470 	/*
471 	 * No need to wake up other CPUs if the thread we just enqueued
472 	 * is bound.
473 	 */
474 	if (bound)
475 		return;
476 
477 
478 	/*
479 	 * See if there's any other halted CPUs. If there are, then
480 	 * select one, and awaken it.
481 	 * It's possible that after we find a CPU, somebody else
482 	 * will awaken it before we get the chance.
483 	 * In that case, look again.
484 	 */
485 	do {
486 		CPUSET_FIND(cp->cp_mach->mc_haltset, cpu_found);
487 		if (cpu_found == CPUSET_NOTINSET)
488 			return;
489 
490 		ASSERT(cpu_found >= 0 && cpu_found < NCPU);
491 		CPUSET_ATOMIC_XDEL(cp->cp_mach->mc_haltset, cpu_found, result);
492 	} while (result < 0);
493 
494 	if (cpu_found != CPU->cpu_id)
495 		poke_cpu(cpu_found);
496 }
497 
498 #ifndef __xpv
499 /*
500  * Idle the present CPU until awoken via touching its monitored line
501  */
502 static void
503 cpu_idle_mwait(void)
504 {
505 	volatile uint32_t	*mcpu_mwait = CPU->cpu_m.mcpu_mwait;
506 	cpu_t			*cpup = CPU;
507 	processorid_t		cpun = cpup->cpu_id;
508 	cpupart_t		*cp = cpup->cpu_part;
509 	int			hset_update = 1;
510 
511 	/*
512 	 * Set our mcpu_mwait here, so we can tell if anyone trys to
513 	 * wake us between now and when we call mwait.  No other cpu will
514 	 * attempt to set our mcpu_mwait until we add ourself to the haltset.
515 	 */
516 	*mcpu_mwait = MWAIT_HALTED;
517 
518 	/*
519 	 * If this CPU is online, and there's multiple CPUs
520 	 * in the system, then we should notate our halting
521 	 * by adding ourselves to the partition's halted CPU
522 	 * bitmap. This allows other CPUs to find/awaken us when
523 	 * work becomes available.
524 	 */
525 	if (cpup->cpu_flags & CPU_OFFLINE || ncpus == 1)
526 		hset_update = 0;
527 
528 	/*
529 	 * Add ourselves to the partition's halted CPUs bitmask
530 	 * and set our HALTED flag, if necessary.
531 	 *
532 	 * When a thread becomes runnable, it is placed on the queue
533 	 * and then the halted cpuset is checked to determine who
534 	 * (if anyone) should be awoken. We therefore need to first
535 	 * add ourselves to the halted cpuset, and and then check if there
536 	 * is any work available.
537 	 *
538 	 * Note that memory barriers after updating the HALTED flag
539 	 * are not necessary since an atomic operation (updating the bitmap)
540 	 * immediately follows. On x86 the atomic operation acts as a
541 	 * memory barrier for the update of cpu_disp_flags.
542 	 */
543 	if (hset_update) {
544 		cpup->cpu_disp_flags |= CPU_DISP_HALTED;
545 		CPUSET_ATOMIC_ADD(cp->cp_mach->mc_haltset, cpun);
546 	}
547 
548 	/*
549 	 * Check to make sure there's really nothing to do.
550 	 * Work destined for this CPU may become available after
551 	 * this check. We'll be notified through the clearing of our
552 	 * bit in the halted CPU bitmask, and a write to our mcpu_mwait.
553 	 *
554 	 * disp_anywork() checks disp_nrunnable, so we do not have to later.
555 	 */
556 	if (disp_anywork()) {
557 		if (hset_update) {
558 			cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
559 			CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
560 		}
561 		return;
562 	}
563 
564 	/*
565 	 * We're on our way to being halted.
566 	 * To avoid a lost wakeup, arm the monitor before checking if another
567 	 * cpu wrote to mcpu_mwait to wake us up.
568 	 */
569 	i86_monitor(mcpu_mwait, 0, 0);
570 	if (*mcpu_mwait == MWAIT_HALTED) {
571 		DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C1);
572 
573 		tlb_going_idle();
574 		i86_mwait(0, 0);
575 		tlb_service();
576 
577 		DTRACE_PROBE1(idle__state__transition, uint_t, IDLE_STATE_C0);
578 	}
579 
580 	/*
581 	 * We're no longer halted
582 	 */
583 	if (hset_update) {
584 		cpup->cpu_disp_flags &= ~CPU_DISP_HALTED;
585 		CPUSET_ATOMIC_DEL(cp->cp_mach->mc_haltset, cpun);
586 	}
587 }
588 
589 /*
590  * If "cpu" is halted in mwait, then wake it up clearing its halted bit in
591  * advance.  Otherwise, see if other CPUs in the cpu partition are halted and
592  * need to be woken up so that they can steal the thread we placed on this CPU.
593  * This function is only used on MP systems.
594  */
595 static void
596 cpu_wakeup_mwait(cpu_t *cp, int bound)
597 {
598 	cpupart_t	*cpu_part;
599 	uint_t		cpu_found;
600 	int		result;
601 
602 	cpu_part = cp->cpu_part;
603 
604 	/*
605 	 * Clear the halted bit for that CPU since it will be woken up
606 	 * in a moment.
607 	 */
608 	if (CPU_IN_SET(cpu_part->cp_mach->mc_haltset, cp->cpu_id)) {
609 		/*
610 		 * Clear the halted bit for that CPU since it will be
611 		 * poked in a moment.
612 		 */
613 		CPUSET_ATOMIC_DEL(cpu_part->cp_mach->mc_haltset, cp->cpu_id);
614 		/*
615 		 * We may find the current CPU present in the halted cpuset
616 		 * if we're in the context of an interrupt that occurred
617 		 * before we had a chance to clear our bit in cpu_idle().
618 		 * Waking ourself is obviously unnecessary, since if
619 		 * we're here, we're not halted.
620 		 *
621 		 * monitor/mwait wakeup via writing to our cache line is
622 		 * harmless and less expensive than always checking if we
623 		 * are waking ourself which is an uncommon case.
624 		 */
625 		MWAIT_WAKEUP(cp);	/* write to monitored line */
626 		return;
627 	} else {
628 		/*
629 		 * This cpu isn't halted, but it's idle or undergoing a
630 		 * context switch. No need to awaken anyone else.
631 		 */
632 		if (cp->cpu_thread == cp->cpu_idle_thread ||
633 		    cp->cpu_disp_flags & CPU_DISP_DONTSTEAL)
634 			return;
635 	}
636 
637 	/*
638 	 * No need to wake up other CPUs if the thread we just enqueued
639 	 * is bound.
640 	 */
641 	if (bound)
642 		return;
643 
644 
645 	/*
646 	 * See if there's any other halted CPUs. If there are, then
647 	 * select one, and awaken it.
648 	 * It's possible that after we find a CPU, somebody else
649 	 * will awaken it before we get the chance.
650 	 * In that case, look again.
651 	 */
652 	do {
653 		CPUSET_FIND(cpu_part->cp_mach->mc_haltset, cpu_found);
654 		if (cpu_found == CPUSET_NOTINSET)
655 			return;
656 
657 		ASSERT(cpu_found >= 0 && cpu_found < NCPU);
658 		CPUSET_ATOMIC_XDEL(cpu_part->cp_mach->mc_haltset, cpu_found,
659 		    result);
660 	} while (result < 0);
661 
662 	/*
663 	 * Do not check if cpu_found is ourself as monitor/mwait wakeup is
664 	 * cheap.
665 	 */
666 	MWAIT_WAKEUP(cpu[cpu_found]);	/* write to monitored line */
667 }
668 #endif
669 
670 void (*cpu_pause_handler)(volatile char *) = NULL;
671 
672 static int
673 mp_disable_intr(int cpun)
674 {
675 	/*
676 	 * switch to the offline cpu
677 	 */
678 	affinity_set(cpun);
679 	/*
680 	 * raise ipl to just below cross call
681 	 */
682 	splx(XC_MED_PIL-1);
683 	/*
684 	 *	set base spl to prevent the next swtch to idle from
685 	 *	lowering back to ipl 0
686 	 */
687 	CPU->cpu_intr_actv |= (1 << (XC_MED_PIL-1));
688 	set_base_spl();
689 	affinity_clear();
690 	return (DDI_SUCCESS);
691 }
692 
693 static void
694 mp_enable_intr(int cpun)
695 {
696 	/*
697 	 * switch to the online cpu
698 	 */
699 	affinity_set(cpun);
700 	/*
701 	 * clear the interrupt active mask
702 	 */
703 	CPU->cpu_intr_actv &= ~(1 << (XC_MED_PIL-1));
704 	set_base_spl();
705 	(void) spl0();
706 	affinity_clear();
707 }
708 
709 static void
710 mach_get_platform(int owner)
711 {
712 	void		**srv_opsp;
713 	void		**clt_opsp;
714 	int		i;
715 	int		total_ops;
716 
717 	/* fix up psm ops */
718 	srv_opsp = (void **)mach_set[0];
719 	clt_opsp = (void **)mach_set[owner];
720 	if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01)
721 		total_ops = sizeof (struct psm_ops_ver01) /
722 		    sizeof (void (*)(void));
723 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_1)
724 		/* no psm_notify_func */
725 		total_ops = OFFSETOF(struct psm_ops, psm_notify_func) /
726 		    sizeof (void (*)(void));
727 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_2)
728 		/* no psm_timer funcs */
729 		total_ops = OFFSETOF(struct psm_ops, psm_timer_reprogram) /
730 		    sizeof (void (*)(void));
731 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_3)
732 		/* no psm_preshutdown function */
733 		total_ops = OFFSETOF(struct psm_ops, psm_preshutdown) /
734 		    sizeof (void (*)(void));
735 	else if (mach_ver[owner] == (ushort_t)PSM_INFO_VER01_4)
736 		/* no psm_preshutdown function */
737 		total_ops = OFFSETOF(struct psm_ops, psm_intr_ops) /
738 		    sizeof (void (*)(void));
739 	else
740 		total_ops = sizeof (struct psm_ops) / sizeof (void (*)(void));
741 
742 	/*
743 	 * Save the version of the PSM module, in case we need to
744 	 * bahave differently based on version.
745 	 */
746 	mach_ver[0] = mach_ver[owner];
747 
748 	for (i = 0; i < total_ops; i++)
749 		if (clt_opsp[i] != NULL)
750 			srv_opsp[i] = clt_opsp[i];
751 }
752 
753 static void
754 mach_construct_info()
755 {
756 	struct psm_sw *swp;
757 	int	mach_cnt[PSM_OWN_OVERRIDE+1] = {0};
758 	int	conflict_owner = 0;
759 
760 	if (psmsw->psw_forw == psmsw)
761 		panic("No valid PSM modules found");
762 	mutex_enter(&psmsw_lock);
763 	for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) {
764 		if (!(swp->psw_flag & PSM_MOD_IDENTIFY))
765 			continue;
766 		mach_set[swp->psw_infop->p_owner] = swp->psw_infop->p_ops;
767 		mach_ver[swp->psw_infop->p_owner] = swp->psw_infop->p_version;
768 		mach_cnt[swp->psw_infop->p_owner]++;
769 	}
770 	mutex_exit(&psmsw_lock);
771 
772 	mach_get_platform(PSM_OWN_SYS_DEFAULT);
773 
774 	/* check to see are there any conflicts */
775 	if (mach_cnt[PSM_OWN_EXCLUSIVE] > 1)
776 		conflict_owner = PSM_OWN_EXCLUSIVE;
777 	if (mach_cnt[PSM_OWN_OVERRIDE] > 1)
778 		conflict_owner = PSM_OWN_OVERRIDE;
779 	if (conflict_owner) {
780 		/* remove all psm modules except uppc */
781 		cmn_err(CE_WARN,
782 		    "Conflicts detected on the following PSM modules:");
783 		mutex_enter(&psmsw_lock);
784 		for (swp = psmsw->psw_forw; swp != psmsw; swp = swp->psw_forw) {
785 			if (swp->psw_infop->p_owner == conflict_owner)
786 				cmn_err(CE_WARN, "%s ",
787 				    swp->psw_infop->p_mach_idstring);
788 		}
789 		mutex_exit(&psmsw_lock);
790 		cmn_err(CE_WARN,
791 		    "Setting the system back to SINGLE processor mode!");
792 		cmn_err(CE_WARN,
793 		    "Please edit /etc/mach to remove the invalid PSM module.");
794 		return;
795 	}
796 
797 	if (mach_set[PSM_OWN_EXCLUSIVE])
798 		mach_get_platform(PSM_OWN_EXCLUSIVE);
799 
800 	if (mach_set[PSM_OWN_OVERRIDE])
801 		mach_get_platform(PSM_OWN_OVERRIDE);
802 }
803 
804 static void
805 mach_init()
806 {
807 	struct psm_ops  *pops;
808 
809 	mach_construct_info();
810 
811 	pops = mach_set[0];
812 
813 	/* register the interrupt and clock initialization rotuines */
814 	picinitf = mach_picinit;
815 	clkinitf = mach_clkinit;
816 	psm_get_clockirq = pops->psm_get_clockirq;
817 
818 	/* register the interrupt setup code */
819 	slvltovect = mach_softlvl_to_vect;
820 	addspl	= pops->psm_addspl;
821 	delspl	= pops->psm_delspl;
822 
823 	if (pops->psm_translate_irq)
824 		psm_translate_irq = pops->psm_translate_irq;
825 	if (pops->psm_intr_ops)
826 		psm_intr_ops = pops->psm_intr_ops;
827 
828 #if defined(PSMI_1_2) || defined(PSMI_1_3) || defined(PSMI_1_4)
829 	/*
830 	 * Time-of-day functionality now handled in TOD modules.
831 	 * (Warn about PSM modules that think that we're going to use
832 	 * their ops vectors.)
833 	 */
834 	if (pops->psm_tod_get)
835 		cmn_err(CE_WARN, "obsolete psm_tod_get op %p",
836 		    (void *)pops->psm_tod_get);
837 
838 	if (pops->psm_tod_set)
839 		cmn_err(CE_WARN, "obsolete psm_tod_set op %p",
840 		    (void *)pops->psm_tod_set);
841 #endif
842 
843 	if (pops->psm_notify_error) {
844 		psm_notify_error = mach_notify_error;
845 		notify_error = pops->psm_notify_error;
846 	}
847 
848 	(*pops->psm_softinit)();
849 
850 	/*
851 	 * Initialize the dispatcher's function hooks
852 	 * to enable CPU halting when idle.
853 	 * Do not use monitor/mwait if idle_cpu_use_hlt is not set(spin idle)
854 	 * or idle_cpu_prefer_mwait is not set.
855 	 * Allocate monitor/mwait buffer for cpu0.
856 	 */
857 	if (idle_cpu_use_hlt) {
858 		idle_cpu = cpu_idle;
859 #ifndef __xpv
860 		if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait) {
861 			CPU->cpu_m.mcpu_mwait = cpuid_mwait_alloc(CPU);
862 			/*
863 			 * Protect ourself from insane mwait size.
864 			 */
865 			if (CPU->cpu_m.mcpu_mwait == NULL) {
866 #ifdef DEBUG
867 				cmn_err(CE_NOTE, "Using hlt idle.  Cannot "
868 				    "handle cpu 0 mwait size.");
869 #endif
870 				idle_cpu_prefer_mwait = 0;
871 				idle_cpu = cpu_idle;
872 			} else {
873 				idle_cpu = cpu_idle_mwait;
874 			}
875 		} else {
876 			idle_cpu = cpu_idle;
877 		}
878 #endif
879 	}
880 
881 	mach_smpinit();
882 }
883 
884 static void
885 mach_smpinit(void)
886 {
887 	struct psm_ops  *pops;
888 	processorid_t cpu_id;
889 	int cnt;
890 	cpuset_t cpumask;
891 
892 	pops = mach_set[0];
893 	CPUSET_ZERO(cpumask);
894 
895 	cpu_id = -1;
896 	cpu_id = (*pops->psm_get_next_processorid)(cpu_id);
897 	for (cnt = 0; cpu_id != -1; cnt++) {
898 		CPUSET_ADD(cpumask, cpu_id);
899 		cpu_id = (*pops->psm_get_next_processorid)(cpu_id);
900 	}
901 
902 	mp_cpus = cpumask;
903 
904 	/* MP related routines */
905 	ap_mlsetup = pops->psm_post_cpu_start;
906 	send_dirintf = pops->psm_send_ipi;
907 
908 	/* optional MP related routines */
909 	if (pops->psm_shutdown)
910 		psm_shutdownf = pops->psm_shutdown;
911 	if (pops->psm_preshutdown)
912 		psm_preshutdownf = pops->psm_preshutdown;
913 	if (pops->psm_notify_func)
914 		psm_notifyf = pops->psm_notify_func;
915 	if (pops->psm_set_idlecpu)
916 		psm_set_idle_cpuf = pops->psm_set_idlecpu;
917 	if (pops->psm_unset_idlecpu)
918 		psm_unset_idle_cpuf = pops->psm_unset_idlecpu;
919 
920 	psm_clkinit = pops->psm_clkinit;
921 
922 	if (pops->psm_timer_reprogram)
923 		psm_timer_reprogram = pops->psm_timer_reprogram;
924 
925 	if (pops->psm_timer_enable)
926 		psm_timer_enable = pops->psm_timer_enable;
927 
928 	if (pops->psm_timer_disable)
929 		psm_timer_disable = pops->psm_timer_disable;
930 
931 	if (pops->psm_post_cyclic_setup)
932 		psm_post_cyclic_setup = pops->psm_post_cyclic_setup;
933 
934 	if (pops->psm_state)
935 		psm_state = pops->psm_state;
936 
937 	/* check for multiple cpu's */
938 	if (cnt < 2)
939 		return;
940 
941 	/* check for MP platforms */
942 	if (pops->psm_cpu_start == NULL)
943 		return;
944 
945 	/*
946 	 * Set the dispatcher hook to enable cpu "wake up"
947 	 * when a thread becomes runnable.
948 	 */
949 	if (idle_cpu_use_hlt) {
950 		disp_enq_thread = cpu_wakeup;
951 #ifndef __xpv
952 		if ((x86_feature & X86_MWAIT) && idle_cpu_prefer_mwait)
953 			disp_enq_thread = cpu_wakeup_mwait;
954 #endif
955 	}
956 
957 	if (pops->psm_disable_intr)
958 		psm_disable_intr = pops->psm_disable_intr;
959 	if (pops->psm_enable_intr)
960 		psm_enable_intr  = pops->psm_enable_intr;
961 
962 	psm_get_ipivect = pops->psm_get_ipivect;
963 
964 	(void) add_avintr((void *)NULL, XC_HI_PIL, xc_serv, "xc_hi_intr",
965 	    (*pops->psm_get_ipivect)(XC_HI_PIL, PSM_INTR_IPI_HI),
966 	    (caddr_t)X_CALL_HIPRI, NULL, NULL, NULL);
967 	(void) add_avintr((void *)NULL, XC_MED_PIL, xc_serv, "xc_med_intr",
968 	    (*pops->psm_get_ipivect)(XC_MED_PIL, PSM_INTR_IPI_LO),
969 	    (caddr_t)X_CALL_MEDPRI, NULL, NULL, NULL);
970 
971 	(void) (*pops->psm_get_ipivect)(XC_CPUPOKE_PIL, PSM_INTR_POKE);
972 }
973 
974 static void
975 mach_picinit()
976 {
977 	struct psm_ops  *pops;
978 
979 	pops = mach_set[0];
980 
981 	/* register the interrupt handlers */
982 	setlvl = pops->psm_intr_enter;
983 	setlvlx = pops->psm_intr_exit;
984 
985 	/* initialize the interrupt hardware */
986 	(*pops->psm_picinit)();
987 
988 	/* set interrupt mask for current ipl */
989 	setspl = pops->psm_setspl;
990 	cli();
991 	setspl(CPU->cpu_pri);
992 }
993 
994 uint_t	cpu_freq;	/* MHz */
995 uint64_t cpu_freq_hz;	/* measured (in hertz) */
996 
997 #define	MEGA_HZ		1000000
998 
999 #ifdef __xpv
1000 
1001 int xpv_cpufreq_workaround = 1;
1002 int xpv_cpufreq_verbose = 0;
1003 
1004 #else	/* __xpv */
1005 
1006 static uint64_t
1007 mach_calchz(uint32_t pit_counter, uint64_t *processor_clks)
1008 {
1009 	uint64_t cpu_hz;
1010 
1011 	if ((pit_counter == 0) || (*processor_clks == 0) ||
1012 	    (*processor_clks > (((uint64_t)-1) / PIT_HZ)))
1013 		return (0);
1014 
1015 	cpu_hz = ((uint64_t)PIT_HZ * *processor_clks) / pit_counter;
1016 
1017 	return (cpu_hz);
1018 }
1019 
1020 #endif	/* __xpv */
1021 
1022 static uint64_t
1023 mach_getcpufreq(void)
1024 {
1025 #if defined(__xpv)
1026 	vcpu_time_info_t *vti = &CPU->cpu_m.mcpu_vcpu_info->time;
1027 	uint64_t cpu_hz;
1028 
1029 	/*
1030 	 * During dom0 bringup, it was noted that on at least one older
1031 	 * Intel HT machine, the hypervisor initially gives a tsc_to_system_mul
1032 	 * value that is quite wrong (the 3.06GHz clock was reported
1033 	 * as 4.77GHz)
1034 	 *
1035 	 * The curious thing is, that if you stop the kernel at entry,
1036 	 * breakpoint here and inspect the value with kmdb, the value
1037 	 * is correct - but if you don't stop and simply enable the
1038 	 * printf statement (below), you can see the bad value printed
1039 	 * here.  Almost as if something kmdb did caused the hypervisor to
1040 	 * figure it out correctly.  And, note that the hypervisor
1041 	 * eventually -does- figure it out correctly ... if you look at
1042 	 * the field later in the life of dom0, it is correct.
1043 	 *
1044 	 * For now, on dom0, we employ a slightly cheesy workaround of
1045 	 * using the DOM0_PHYSINFO hypercall.
1046 	 */
1047 	if (DOMAIN_IS_INITDOMAIN(xen_info) && xpv_cpufreq_workaround) {
1048 		xen_sysctl_t op0, *op = &op0;
1049 
1050 		op->cmd = XEN_SYSCTL_physinfo;
1051 		op->interface_version = XEN_SYSCTL_INTERFACE_VERSION;
1052 		if (HYPERVISOR_sysctl(op) != 0)
1053 			panic("physinfo op refused");
1054 
1055 		cpu_hz = 1000 * (uint64_t)op->u.physinfo.cpu_khz;
1056 	} else {
1057 		cpu_hz = (UINT64_C(1000000000) << 32) / vti->tsc_to_system_mul;
1058 
1059 		if (vti->tsc_shift < 0)
1060 			cpu_hz <<= -vti->tsc_shift;
1061 		else
1062 			cpu_hz >>= vti->tsc_shift;
1063 	}
1064 
1065 	if (xpv_cpufreq_verbose)
1066 		printf("mach_getcpufreq: system_mul 0x%x, shift %d, "
1067 		    "cpu_hz %" PRId64 "Hz\n",
1068 		    vti->tsc_to_system_mul, vti->tsc_shift, cpu_hz);
1069 
1070 	return (cpu_hz);
1071 #else	/* __xpv */
1072 	uint32_t pit_counter;
1073 	uint64_t processor_clks;
1074 
1075 	if (x86_feature & X86_TSC) {
1076 		/*
1077 		 * We have a TSC. freq_tsc() knows how to measure the number
1078 		 * of clock cycles sampled against the PIT.
1079 		 */
1080 		ulong_t flags = clear_int_flag();
1081 		processor_clks = freq_tsc(&pit_counter);
1082 		restore_int_flag(flags);
1083 		return (mach_calchz(pit_counter, &processor_clks));
1084 	} else if (x86_vendor == X86_VENDOR_Cyrix || x86_type == X86_TYPE_P5) {
1085 #if defined(__amd64)
1086 		panic("mach_getcpufreq: no TSC!");
1087 #elif defined(__i386)
1088 		/*
1089 		 * We are a Cyrix based on a 6x86 core or an Intel Pentium
1090 		 * for which freq_notsc() knows how to measure the number of
1091 		 * elapsed clock cycles sampled against the PIT
1092 		 */
1093 		ulong_t flags = clear_int_flag();
1094 		processor_clks = freq_notsc(&pit_counter);
1095 		restore_int_flag(flags);
1096 		return (mach_calchz(pit_counter, &processor_clks));
1097 #endif	/* __i386 */
1098 	}
1099 
1100 	/* We do not know how to calculate cpu frequency for this cpu. */
1101 	return (0);
1102 #endif	/* __xpv */
1103 }
1104 
1105 /*
1106  * If the clock speed of a cpu is found to be reported incorrectly, do not add
1107  * to this array, instead improve the accuracy of the algorithm that determines
1108  * the clock speed of the processor or extend the implementation to support the
1109  * vendor as appropriate. This is here only to support adjusting the speed on
1110  * older slower processors that mach_fixcpufreq() would not be able to account
1111  * for otherwise.
1112  */
1113 static int x86_cpu_freq[] = { 60, 75, 80, 90, 120, 160, 166, 175, 180, 233 };
1114 
1115 /*
1116  * On fast processors the clock frequency that is measured may be off by
1117  * a few MHz from the value printed on the part. This is a combination of
1118  * the factors that for such fast parts being off by this much is within
1119  * the tolerances for manufacture and because of the difficulties in the
1120  * measurement that can lead to small error. This function uses some
1121  * heuristics in order to tweak the value that was measured to match what
1122  * is most likely printed on the part.
1123  *
1124  * Some examples:
1125  * 	AMD Athlon 1000 mhz measured as 998 mhz
1126  * 	Intel Pentium III Xeon 733 mhz measured as 731 mhz
1127  * 	Intel Pentium IV 1500 mhz measured as 1495mhz
1128  *
1129  * If in the future this function is no longer sufficient to correct
1130  * for the error in the measurement, then the algorithm used to perform
1131  * the measurement will have to be improved in order to increase accuracy
1132  * rather than adding horrible and questionable kludges here.
1133  *
1134  * This is called after the cyclics subsystem because of the potential
1135  * that the heuristics within may give a worse estimate of the clock
1136  * frequency than the value that was measured.
1137  */
1138 static void
1139 mach_fixcpufreq(void)
1140 {
1141 	uint32_t freq, mul, near66, delta66, near50, delta50, fixed, delta, i;
1142 
1143 	freq = (uint32_t)cpu_freq;
1144 
1145 	/*
1146 	 * Find the nearest integer multiple of 200/3 (about 66) MHz to the
1147 	 * measured speed taking into account that the 667 MHz parts were
1148 	 * the first to round-up.
1149 	 */
1150 	mul = (uint32_t)((3 * (uint64_t)freq + 100) / 200);
1151 	near66 = (uint32_t)((200 * (uint64_t)mul + ((mul >= 10) ? 1 : 0)) / 3);
1152 	delta66 = (near66 > freq) ? (near66 - freq) : (freq - near66);
1153 
1154 	/* Find the nearest integer multiple of 50 MHz to the measured speed */
1155 	mul = (freq + 25) / 50;
1156 	near50 = mul * 50;
1157 	delta50 = (near50 > freq) ? (near50 - freq) : (freq - near50);
1158 
1159 	/* Find the closer of the two */
1160 	if (delta66 < delta50) {
1161 		fixed = near66;
1162 		delta = delta66;
1163 	} else {
1164 		fixed = near50;
1165 		delta = delta50;
1166 	}
1167 
1168 	if (fixed > INT_MAX)
1169 		return;
1170 
1171 	/*
1172 	 * Some older parts have a core clock frequency that is not an
1173 	 * integral multiple of 50 or 66 MHz. Check if one of the old
1174 	 * clock frequencies is closer to the measured value than any
1175 	 * of the integral multiples of 50 an 66, and if so set fixed
1176 	 * and delta appropriately to represent the closest value.
1177 	 */
1178 	i = sizeof (x86_cpu_freq) / sizeof (int);
1179 	while (i > 0) {
1180 		i--;
1181 
1182 		if (x86_cpu_freq[i] <= freq) {
1183 			mul = freq - x86_cpu_freq[i];
1184 
1185 			if (mul < delta) {
1186 				fixed = x86_cpu_freq[i];
1187 				delta = mul;
1188 			}
1189 
1190 			break;
1191 		}
1192 
1193 		mul = x86_cpu_freq[i] - freq;
1194 
1195 		if (mul < delta) {
1196 			fixed = x86_cpu_freq[i];
1197 			delta = mul;
1198 		}
1199 	}
1200 
1201 	/*
1202 	 * Set a reasonable maximum for how much to correct the measured
1203 	 * result by. This check is here to prevent the adjustment made
1204 	 * by this function from being more harm than good. It is entirely
1205 	 * possible that in the future parts will be made that are not
1206 	 * integral multiples of 66 or 50 in clock frequency or that
1207 	 * someone may overclock a part to some odd frequency. If the
1208 	 * measured value is farther from the corrected value than
1209 	 * allowed, then assume the corrected value is in error and use
1210 	 * the measured value.
1211 	 */
1212 	if (6 < delta)
1213 		return;
1214 
1215 	cpu_freq = (int)fixed;
1216 }
1217 
1218 
1219 static int
1220 machhztomhz(uint64_t cpu_freq_hz)
1221 {
1222 	uint64_t cpu_mhz;
1223 
1224 	/* Round to nearest MHZ */
1225 	cpu_mhz = (cpu_freq_hz + (MEGA_HZ / 2)) / MEGA_HZ;
1226 
1227 	if (cpu_mhz > INT_MAX)
1228 		return (0);
1229 
1230 	return ((int)cpu_mhz);
1231 
1232 }
1233 
1234 
1235 static int
1236 mach_clkinit(int preferred_mode, int *set_mode)
1237 {
1238 	struct psm_ops  *pops;
1239 	int resolution;
1240 
1241 	pops = mach_set[0];
1242 
1243 	cpu_freq_hz = mach_getcpufreq();
1244 
1245 	cpu_freq = machhztomhz(cpu_freq_hz);
1246 
1247 	if (!(x86_feature & X86_TSC) || (cpu_freq == 0))
1248 		tsc_gethrtime_enable = 0;
1249 
1250 #ifndef __xpv
1251 	if (tsc_gethrtime_enable) {
1252 		tsc_hrtimeinit(cpu_freq_hz);
1253 	} else
1254 #endif
1255 	{
1256 		if (pops->psm_hrtimeinit)
1257 			(*pops->psm_hrtimeinit)();
1258 		gethrtimef = pops->psm_gethrtime;
1259 		gethrtimeunscaledf = gethrtimef;
1260 		/* scalehrtimef will remain dummy */
1261 	}
1262 
1263 	mach_fixcpufreq();
1264 
1265 	if (mach_ver[0] >= PSM_INFO_VER01_3) {
1266 		if (preferred_mode == TIMER_ONESHOT) {
1267 
1268 			resolution = (*pops->psm_clkinit)(0);
1269 			if (resolution != 0)  {
1270 				*set_mode = TIMER_ONESHOT;
1271 				return (resolution);
1272 			}
1273 		}
1274 
1275 		/*
1276 		 * either periodic mode was requested or could not set to
1277 		 * one-shot mode
1278 		 */
1279 		resolution = (*pops->psm_clkinit)(hz);
1280 		/*
1281 		 * psm should be able to do periodic, so we do not check
1282 		 * for return value of psm_clkinit here.
1283 		 */
1284 		*set_mode = TIMER_PERIODIC;
1285 		return (resolution);
1286 	} else {
1287 		/*
1288 		 * PSMI interface prior to PSMI_3 does not define a return
1289 		 * value for psm_clkinit, so the return value is ignored.
1290 		 */
1291 		(void) (*pops->psm_clkinit)(hz);
1292 		*set_mode = TIMER_PERIODIC;
1293 		return (nsec_per_tick);
1294 	}
1295 }
1296 
1297 
1298 /*ARGSUSED*/
1299 static int
1300 mach_softlvl_to_vect(int ipl)
1301 {
1302 	setsoftint = av_set_softint_pending;
1303 	kdisetsoftint = kdi_av_set_softint_pending;
1304 
1305 	return (PSM_SV_SOFTWARE);
1306 }
1307 
1308 #ifdef DEBUG
1309 /*
1310  * This is here to allow us to simulate cpus that refuse to start.
1311  */
1312 cpuset_t cpufailset;
1313 #endif
1314 
1315 int
1316 mach_cpu_start(struct cpu *cp, void *ctx)
1317 {
1318 	struct psm_ops *pops = mach_set[0];
1319 	processorid_t id = cp->cpu_id;
1320 
1321 #ifdef DEBUG
1322 	if (CPU_IN_SET(cpufailset, id))
1323 		return (0);
1324 #endif
1325 	return ((*pops->psm_cpu_start)(id, ctx));
1326 }
1327 
1328 int
1329 mach_cpuid_start(processorid_t id, void *ctx)
1330 {
1331 	struct psm_ops *pops = mach_set[0];
1332 
1333 #ifdef DEBUG
1334 	if (CPU_IN_SET(cpufailset, id))
1335 		return (0);
1336 #endif
1337 	return ((*pops->psm_cpu_start)(id, ctx));
1338 }
1339 
1340 /*ARGSUSED*/
1341 static int
1342 mach_translate_irq(dev_info_t *dip, int irqno)
1343 {
1344 	return (irqno);	/* default to NO translation */
1345 }
1346 
1347 static void
1348 mach_notify_error(int level, char *errmsg)
1349 {
1350 	/*
1351 	 * SL_FATAL is pass in once panicstr is set, deliver it
1352 	 * as CE_PANIC.  Also, translate SL_ codes back to CE_
1353 	 * codes for the psmi handler
1354 	 */
1355 	if (level & SL_FATAL)
1356 		(*notify_error)(CE_PANIC, errmsg);
1357 	else if (level & SL_WARN)
1358 		(*notify_error)(CE_WARN, errmsg);
1359 	else if (level & SL_NOTE)
1360 		(*notify_error)(CE_NOTE, errmsg);
1361 	else if (level & SL_CONSOLE)
1362 		(*notify_error)(CE_CONT, errmsg);
1363 }
1364 
1365 /*
1366  * It provides the default basic intr_ops interface for the new DDI
1367  * interrupt framework if the PSM doesn't have one.
1368  *
1369  * Input:
1370  * dip     - pointer to the dev_info structure of the requested device
1371  * hdlp    - pointer to the internal interrupt handle structure for the
1372  *	     requested interrupt
1373  * intr_op - opcode for this call
1374  * result  - pointer to the integer that will hold the result to be
1375  *	     passed back if return value is PSM_SUCCESS
1376  *
1377  * Output:
1378  * return value is either PSM_SUCCESS or PSM_FAILURE
1379  */
1380 static int
1381 mach_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp,
1382     psm_intr_op_t intr_op, int *result)
1383 {
1384 	struct intrspec *ispec;
1385 
1386 	switch (intr_op) {
1387 	case PSM_INTR_OP_CHECK_MSI:
1388 		*result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI |
1389 		    DDI_INTR_TYPE_MSIX);
1390 		break;
1391 	case PSM_INTR_OP_ALLOC_VECTORS:
1392 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
1393 			*result = 1;
1394 		else
1395 			*result = 0;
1396 		break;
1397 	case PSM_INTR_OP_FREE_VECTORS:
1398 		break;
1399 	case PSM_INTR_OP_NAVAIL_VECTORS:
1400 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
1401 			*result = 1;
1402 		else
1403 			*result = 0;
1404 		break;
1405 	case PSM_INTR_OP_XLATE_VECTOR:
1406 		ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp;
1407 		*result = psm_translate_irq(dip, ispec->intrspec_vec);
1408 		break;
1409 	case PSM_INTR_OP_GET_CAP:
1410 		*result = 0;
1411 		break;
1412 	case PSM_INTR_OP_GET_PENDING:
1413 	case PSM_INTR_OP_CLEAR_MASK:
1414 	case PSM_INTR_OP_SET_MASK:
1415 	case PSM_INTR_OP_GET_SHARED:
1416 	case PSM_INTR_OP_SET_PRI:
1417 	case PSM_INTR_OP_SET_CAP:
1418 	case PSM_INTR_OP_SET_CPU:
1419 	case PSM_INTR_OP_GET_INTR:
1420 	default:
1421 		return (PSM_FAILURE);
1422 	}
1423 	return (PSM_SUCCESS);
1424 }
1425 /*
1426  * Return 1 if CMT load balancing policies should be
1427  * implemented across instances of the specified hardware
1428  * sharing relationship.
1429  */
1430 int
1431 pg_cmt_load_bal_hw(pghw_type_t hw)
1432 {
1433 	if (hw == PGHW_IPIPE ||
1434 	    hw == PGHW_FPU ||
1435 	    hw == PGHW_CHIP)
1436 		return (1);
1437 	else
1438 		return (0);
1439 }
1440 /*
1441  * Return 1 if thread affinity polices should be implemented
1442  * for instances of the specifed hardware sharing relationship.
1443  */
1444 int
1445 pg_cmt_affinity_hw(pghw_type_t hw)
1446 {
1447 	if (hw == PGHW_CACHE)
1448 		return (1);
1449 	else
1450 		return (0);
1451 }
1452