1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/time.h>
30 #include <sys/cpuvar.h>
31 #include <sys/dditypes.h>
32 #include <sys/ddipropdefs.h>
33 #include <sys/ddi_impldefs.h>
34 #include <sys/sunddi.h>
35 #include <sys/esunddi.h>
36 #include <sys/sunndi.h>
37 #include <sys/platform_module.h>
38 #include <sys/errno.h>
39 #include <sys/conf.h>
40 #include <sys/modctl.h>
41 #include <sys/promif.h>
42 #include <sys/promimpl.h>
43 #include <sys/prom_plat.h>
44 #include <sys/cmn_err.h>
45 #include <sys/sysmacros.h>
46 #include <sys/mem_cage.h>
47 #include <sys/kobj.h>
48 #include <sys/utsname.h>
49 #include <sys/cpu_sgnblk_defs.h>
50 #include <sys/atomic.h>
51 #include <sys/kdi_impl.h>
52 
53 #include <sys/sgsbbc.h>
54 #include <sys/sgsbbc_iosram.h>
55 #include <sys/sgsbbc_iosram_priv.h>
56 #include <sys/sgsbbc_mailbox.h>
57 #include <sys/sgsgn.h>
58 #include <sys/sgcn.h>
59 #include <sys/serengeti.h>
60 #include <sys/sgfrutypes.h>
61 #include <sys/machsystm.h>
62 #include <sys/sbd_ioctl.h>
63 #include <sys/sbd.h>
64 #include <sys/sbdp_mem.h>
65 
66 #include <sys/memnode.h>
67 #include <vm/vm_dep.h>
68 #include <vm/page.h>
69 
70 #include <sys/cheetahregs.h>
71 #include <sys/plat_ecc_unum.h>
72 #include <sys/plat_ecc_dimm.h>
73 
74 #include <sys/lgrp.h>
75 
76 static int sg_debug = 0;
77 
78 #ifdef DEBUG
79 #define	DCMNERR if (sg_debug) cmn_err
80 #else
81 #define	DCMNERR
82 #endif
83 
84 int (*p2get_mem_unum)(int, uint64_t, char *, int, int *);
85 
86 /* local functions */
87 static void cpu_sgn_update(ushort_t sgn, uchar_t state,
88     uchar_t sub_state, int cpuid);
89 
90 
91 /*
92  * Local data.
93  *
94  * iosram_write_ptr is a pointer to iosram_write().  Because of
95  * kernel dynamic linking, we can't get to the function by name,
96  * but we can look up its address, and store it in this variable
97  * instead.
98  *
99  * We include the extern for iosram_write() here not because we call
100  * it, but to force compilation errors if its prototype doesn't
101  * match the prototype of iosram_write_ptr.
102  *
103  * The same issues apply to iosram_read() and iosram_read_ptr.
104  */
105 /*CSTYLED*/
106 extern int   iosram_write     (int, uint32_t, caddr_t, uint32_t);
107 static int (*iosram_write_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
108 /*CSTYLED*/
109 extern int   iosram_read     (int, uint32_t, caddr_t, uint32_t);
110 static int (*iosram_read_ptr)(int, uint32_t, caddr_t, uint32_t) = NULL;
111 
112 
113 /*
114  * Variable to indicate if the date should be obtained from the SC or not.
115  */
116 int todsg_use_sc = FALSE;	/* set the false at the beginning */
117 
118 /*
119  * Preallocation of spare tsb's for DR
120  *
121  * We don't allocate spares for Wildcat since TSBs should come
122  * out of memory local to the node.
123  */
124 #define	IOMMU_PER_SCHIZO	2
125 int serengeti_tsb_spares = (SG_MAX_IO_BDS * SG_SCHIZO_PER_IO_BD *
126 	IOMMU_PER_SCHIZO);
127 
128 /*
129  * sg_max_ncpus is the maximum number of CPUs supported on Serengeti.
130  * sg_max_ncpus is set to be smaller than NCPU to reduce the amount of
131  * memory the logs take up until we have a dynamic log memory allocation
132  * solution.
133  */
134 int sg_max_ncpus = (24 * 2);    /* (max # of processors * # of cores/proc) */
135 
136 /*
137  * variables to control mailbox message timeouts.
138  * These can be patched via /etc/system or mdb.
139  */
140 int	sbbc_mbox_default_timeout = MBOX_DEFAULT_TIMEOUT;
141 int	sbbc_mbox_min_timeout = MBOX_MIN_TIMEOUT;
142 
143 /* cached 'chosen' node_id */
144 pnode_t chosen_nodeid = (pnode_t)0;
145 
146 static void (*sg_ecc_taskq_func)(sbbc_ecc_mbox_t *) = NULL;
147 static int (*sg_ecc_mbox_func)(sbbc_ecc_mbox_t *) = NULL;
148 
149 /*
150  * Table that maps memory slices to a specific memnode.
151  */
152 int slice_to_memnode[SG_MAX_SLICE];
153 
154 plat_dimm_sid_board_t	domain_dimm_sids[SG_MAX_CPU_BDS];
155 
156 
157 int
158 set_platform_tsb_spares()
159 {
160 	return (MIN(serengeti_tsb_spares, MAX_UPA));
161 }
162 
163 #pragma weak mmu_init_large_pages
164 
165 void
166 set_platform_defaults(void)
167 {
168 	extern int watchdog_enable;
169 	extern uint64_t xc_tick_limit_scale;
170 	extern void mmu_init_large_pages(size_t);
171 
172 #ifdef DEBUG
173 	char *todsg_name = "todsg";
174 	ce_verbose_memory = 2;
175 	ce_verbose_other = 2;
176 #endif /* DEBUG */
177 
178 	watchdog_enable = TRUE;
179 	watchdog_available = TRUE;
180 
181 	cpu_sgn_func = cpu_sgn_update;
182 
183 #ifdef DEBUG
184 	/* tod_module_name should be set to "todsg" from OBP property */
185 	if (tod_module_name && (strcmp(tod_module_name, todsg_name) == 0))
186 		prom_printf("Using todsg driver\n");
187 	else {
188 		prom_printf("Force using todsg driver\n");
189 		tod_module_name = todsg_name;
190 	}
191 #endif /* DEBUG */
192 
193 	/* Serengeti does not support forthdebug */
194 	forthdebug_supported = 0;
195 
196 
197 	/*
198 	 * Some DR operations require the system to be sync paused.
199 	 * Sync pause on Serengeti could potentially take up to 4
200 	 * seconds to complete depending on the load on the SC.  To
201 	 * avoid send_mond panics during such operations, we need to
202 	 * increase xc_tick_limit to a larger value on Serengeti by
203 	 * setting xc_tick_limit_scale to 5.
204 	 */
205 	xc_tick_limit_scale = 5;
206 
207 	if ((mmu_page_sizes == max_mmu_page_sizes) &&
208 	    (mmu_ism_pagesize != MMU_PAGESIZE32M)) {
209 		if (&mmu_init_large_pages)
210 			mmu_init_large_pages(mmu_ism_pagesize);
211 	}
212 }
213 
214 void
215 load_platform_modules(void)
216 {
217 	if (modload("misc", "pcihp") < 0) {
218 		cmn_err(CE_NOTE, "pcihp driver failed to load");
219 	}
220 }
221 
222 /*ARGSUSED*/
223 int
224 plat_cpu_poweron(struct cpu *cp)
225 {
226 	int (*serengeti_cpu_poweron)(struct cpu *) = NULL;
227 
228 	serengeti_cpu_poweron =
229 	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweron", 0);
230 
231 	if (serengeti_cpu_poweron == NULL)
232 		return (ENOTSUP);
233 	else
234 		return ((serengeti_cpu_poweron)(cp));
235 }
236 
237 /*ARGSUSED*/
238 int
239 plat_cpu_poweroff(struct cpu *cp)
240 {
241 	int (*serengeti_cpu_poweroff)(struct cpu *) = NULL;
242 
243 	serengeti_cpu_poweroff =
244 	    (int (*)(struct cpu *))modgetsymvalue("sbdp_cpu_poweroff", 0);
245 
246 	if (serengeti_cpu_poweroff == NULL)
247 		return (ENOTSUP);
248 	else
249 		return ((serengeti_cpu_poweroff)(cp));
250 }
251 
252 #ifdef DEBUG
253 pgcnt_t serengeti_cage_size_limit;
254 #endif
255 
256 /* Preferred minimum cage size (expressed in pages)... for DR */
257 pgcnt_t serengeti_minimum_cage_size = 0;
258 
259 void
260 set_platform_cage_params(void)
261 {
262 	extern pgcnt_t total_pages;
263 	extern struct memlist *phys_avail;
264 	int ret;
265 
266 	if (kernel_cage_enable) {
267 		pgcnt_t preferred_cage_size;
268 
269 		preferred_cage_size =
270 		    MAX(serengeti_minimum_cage_size, total_pages / 256);
271 #ifdef DEBUG
272 		if (serengeti_cage_size_limit)
273 			preferred_cage_size = serengeti_cage_size_limit;
274 #endif
275 		kcage_range_lock();
276 		/*
277 		 * Post copies obp into the lowest slice.  This requires the
278 		 * cage to grow upwards
279 		 */
280 		ret = kcage_range_init(phys_avail, 0);
281 		if (ret == 0)
282 			kcage_init(preferred_cage_size);
283 		kcage_range_unlock();
284 	}
285 
286 	/* Only note when the cage is off since it should always be on. */
287 	if (!kcage_on)
288 		cmn_err(CE_NOTE, "!DR Kernel Cage is DISABLED");
289 }
290 
291 #define	ALIGN(x, a)	((a) == 0 ? (uint64_t)(x) : \
292 	(((uint64_t)(x) + (uint64_t)(a) - 1l) & ~((uint64_t)(a) - 1l)))
293 
294 void
295 update_mem_bounds(int brd, uint64_t base, uint64_t sz)
296 {
297 	uint64_t	end;
298 	int		mnode;
299 
300 	end = base + sz - 1;
301 
302 	/*
303 	 * First see if this board already has a memnode associated
304 	 * with it.  If not, see if this slice has a memnode.  This
305 	 * covers the cases where a single slice covers multiple
306 	 * boards (cross-board interleaving) and where a single
307 	 * board has multiple slices (1+GB DIMMs).
308 	 */
309 	if ((mnode = plat_lgrphand_to_mem_node(brd)) == -1) {
310 		if ((mnode = slice_to_memnode[PA_2_SLICE(base)]) == -1)
311 			mnode = mem_node_alloc();
312 		plat_assign_lgrphand_to_mem_node(brd, mnode);
313 	}
314 
315 	/*
316 	 * Align base at 16GB boundary
317 	 */
318 	base = ALIGN(base, (1ul << PA_SLICE_SHIFT));
319 
320 	while (base < end) {
321 		slice_to_memnode[PA_2_SLICE(base)] = mnode;
322 		base += (1ul << PA_SLICE_SHIFT);
323 	}
324 }
325 
326 /*
327  * Dynamically detect memory slices in the system by decoding
328  * the cpu memory decoder registers at boot time.
329  */
330 void
331 plat_fill_mc(pnode_t nodeid)
332 {
333 	uint64_t	mc_addr, mask;
334 	uint64_t	mc_decode[SG_MAX_BANKS_PER_MC];
335 	uint64_t	base, size;
336 	uint32_t	regs[4];
337 	int		len;
338 	int		local_mc;
339 	int		portid;
340 	int		boardid;
341 	int		i;
342 
343 	if ((prom_getprop(nodeid, "portid", (caddr_t)&portid) < 0) ||
344 	    (portid == -1))
345 		return;
346 
347 	/*
348 	 * Decode the board number from the MC portid
349 	 */
350 	boardid = SG_PORTID_TO_BOARD_NUM(portid);
351 
352 	/*
353 	 * The "reg" property returns 4 32-bit values. The first two are
354 	 * combined to form a 64-bit address.  The second two are for a
355 	 * 64-bit size, but we don't actually need to look at that value.
356 	 */
357 	len = prom_getproplen(nodeid, "reg");
358 	if (len != (sizeof (uint32_t) * 4)) {
359 		prom_printf("Warning: malformed 'reg' property\n");
360 		return;
361 	}
362 	if (prom_getprop(nodeid, "reg", (caddr_t)regs) < 0)
363 		return;
364 	mc_addr = ((uint64_t)regs[0]) << 32;
365 	mc_addr |= (uint64_t)regs[1];
366 
367 	/*
368 	 * Figure out whether the memory controller we are examining
369 	 * belongs to this CPU or a different one.
370 	 */
371 	if (portid == cpunodes[CPU->cpu_id].portid)
372 		local_mc = 1;
373 	else
374 		local_mc = 0;
375 
376 	for (i = 0; i < SG_MAX_BANKS_PER_MC; i++) {
377 		mask = SG_REG_2_OFFSET(i);
378 
379 		/*
380 		 * If the memory controller is local to this CPU, we use
381 		 * the special ASI to read the decode registers.
382 		 * Otherwise, we load the values from a magic address in
383 		 * I/O space.
384 		 */
385 		if (local_mc)
386 			mc_decode[i] = lddmcdecode(mask & MC_OFFSET_MASK);
387 		else
388 			mc_decode[i] = lddphysio((mc_addr | mask));
389 
390 		if (mc_decode[i] >> MC_VALID_SHIFT) {
391 			/*
392 			 * The memory decode register is a bitmask field,
393 			 * so we can decode that into both a base and
394 			 * a span.
395 			 */
396 			base = MC_BASE(mc_decode[i]) << PHYS2UM_SHIFT;
397 			size = MC_UK2SPAN(mc_decode[i]);
398 			update_mem_bounds(boardid, base, size);
399 		}
400 	}
401 }
402 
403 /*
404  * This routine is run midway through the boot process.  By the time we get
405  * here, we know about all the active CPU boards in the system, and we have
406  * extracted information about each board's memory from the memory
407  * controllers.  We have also figured out which ranges of memory will be
408  * assigned to which memnodes, so we walk the slice table to build the table
409  * of memnodes.
410  */
411 /* ARGSUSED */
412 void
413 plat_build_mem_nodes(u_longlong_t *list, size_t  nelems)
414 {
415 	int	slice;
416 	pfn_t	basepfn;
417 	pgcnt_t	npgs;
418 
419 	mem_node_pfn_shift = PFN_SLICE_SHIFT;
420 	mem_node_physalign = (1ull << PA_SLICE_SHIFT);
421 
422 	for (slice = 0; slice < SG_MAX_SLICE; slice++) {
423 		if (slice_to_memnode[slice] == -1)
424 			continue;
425 		basepfn = (uint64_t)slice << PFN_SLICE_SHIFT;
426 		npgs = 1ull << PFN_SLICE_SHIFT;
427 		mem_node_add_slice(basepfn, basepfn + npgs - 1);
428 	}
429 }
430 
431 int
432 plat_pfn_to_mem_node(pfn_t pfn)
433 {
434 	int node;
435 
436 	node = slice_to_memnode[PFN_2_SLICE(pfn)];
437 
438 	return (node);
439 }
440 
441 /*
442  * Serengeti support for lgroups.
443  *
444  * On Serengeti, an lgroup platform handle == board number.
445  *
446  * Mappings between lgroup handles and memnodes are managed
447  * in addition to mappings between memory slices and memnodes
448  * to support cross-board interleaving as well as multiple
449  * slices per board (e.g. >1GB DIMMs). The initial mapping
450  * of memnodes to lgroup handles is determined at boot time.
451  * A DR addition of memory adds a new mapping. A DR copy-rename
452  * swaps mappings.
453  */
454 
455 /*
456  * Macro for extracting the board number from the CPU id
457  */
458 #define	CPUID_TO_BOARD(id)	(((id) >> 2) & 0x7)
459 
460 /*
461  * Return the platform handle for the lgroup containing the given CPU
462  *
463  * For Serengeti, lgroup platform handle == board number
464  */
465 lgrp_handle_t
466 plat_lgrp_cpu_to_hand(processorid_t id)
467 {
468 	return (CPUID_TO_BOARD(id));
469 }
470 
471 /*
472  * Platform specific lgroup initialization
473  */
474 void
475 plat_lgrp_init(void)
476 {
477 	int i;
478 	extern uint32_t lgrp_expand_proc_thresh;
479 	extern uint32_t lgrp_expand_proc_diff;
480 
481 	/*
482 	 * Initialize lookup tables to invalid values so we catch
483 	 * any illegal use of them.
484 	 */
485 	for (i = 0; i < SG_MAX_SLICE; i++) {
486 		slice_to_memnode[i] = -1;
487 	}
488 
489 	/*
490 	 * Set tuneables for Serengeti architecture
491 	 *
492 	 * lgrp_expand_proc_thresh is the minimum load on the lgroups
493 	 * this process is currently running on before considering
494 	 * expanding threads to another lgroup.
495 	 *
496 	 * lgrp_expand_proc_diff determines how much less the remote lgroup
497 	 * must be loaded before expanding to it.
498 	 *
499 	 * Bandwidth is maximized on Serengeti by spreading load across
500 	 * the machine. The impact to inter-thread communication isn't
501 	 * too costly since remote latencies are relatively low.  These
502 	 * values equate to one CPU's load and so attempt to spread the
503 	 * load out across as many lgroups as possible one CPU at a time.
504 	 */
505 	lgrp_expand_proc_thresh = LGRP_LOADAVG_THREAD_MAX;
506 	lgrp_expand_proc_diff = LGRP_LOADAVG_THREAD_MAX;
507 }
508 
509 /*
510  * Platform notification of lgroup (re)configuration changes
511  */
512 /*ARGSUSED*/
513 void
514 plat_lgrp_config(lgrp_config_flag_t evt, uintptr_t arg)
515 {
516 	update_membounds_t	*umb;
517 	lgrp_config_mem_rename_t lmr;
518 	lgrp_handle_t		shand, thand;
519 	int			snode, tnode;
520 
521 	switch (evt) {
522 
523 	case LGRP_CONFIG_MEM_ADD:
524 		umb = (update_membounds_t *)arg;
525 		update_mem_bounds(umb->u_board, umb->u_base, umb->u_len);
526 
527 		break;
528 
529 	case LGRP_CONFIG_MEM_DEL:
530 		/* We don't have to do anything */
531 		break;
532 
533 	case LGRP_CONFIG_MEM_RENAME:
534 		/*
535 		 * During a DR copy-rename operation, all of the memory
536 		 * on one board is moved to another board -- but the
537 		 * addresses/pfns and memnodes don't change. This means
538 		 * the memory has changed locations without changing identity.
539 		 *
540 		 * Source is where we are copying from and target is where we
541 		 * are copying to.  After source memnode is copied to target
542 		 * memnode, the physical addresses of the target memnode are
543 		 * renamed to match what the source memnode had.  Then target
544 		 * memnode can be removed and source memnode can take its
545 		 * place.
546 		 *
547 		 * To do this, swap the lgroup handle to memnode mappings for
548 		 * the boards, so target lgroup will have source memnode and
549 		 * source lgroup will have empty target memnode which is where
550 		 * its memory will go (if any is added to it later).
551 		 *
552 		 * Then source memnode needs to be removed from its lgroup
553 		 * and added to the target lgroup where the memory was living
554 		 * but under a different name/memnode.  The memory was in the
555 		 * target memnode and now lives in the source memnode with
556 		 * different physical addresses even though it is the same
557 		 * memory.
558 		 */
559 		shand = arg & 0xffff;
560 		thand = (arg & 0xffff0000) >> 16;
561 		snode = plat_lgrphand_to_mem_node(shand);
562 		tnode = plat_lgrphand_to_mem_node(thand);
563 
564 		plat_assign_lgrphand_to_mem_node(thand, snode);
565 		plat_assign_lgrphand_to_mem_node(shand, tnode);
566 
567 		/*
568 		 * Remove source memnode of copy rename from its lgroup
569 		 * and add it to its new target lgroup
570 		 */
571 		lmr.lmem_rename_from = shand;
572 		lmr.lmem_rename_to = thand;
573 
574 		lgrp_config(LGRP_CONFIG_MEM_RENAME, (uintptr_t)snode,
575 		    (uintptr_t)&lmr);
576 
577 		break;
578 
579 	default:
580 		break;
581 	}
582 }
583 
584 /*
585  * Return latency between "from" and "to" lgroups
586  *
587  * This latency number can only be used for relative comparison
588  * between lgroups on the running system, cannot be used across platforms,
589  * and may not reflect the actual latency.  It is platform and implementation
590  * specific, so platform gets to decide its value.  It would be nice if the
591  * number was at least proportional to make comparisons more meaningful though.
592  * NOTE: The numbers below are supposed to be load latencies for uncached
593  * memory divided by 10.
594  */
595 int
596 plat_lgrp_latency(lgrp_handle_t from, lgrp_handle_t to)
597 {
598 	/*
599 	 * Return min remote latency when there are more than two lgroups
600 	 * (root and child) and getting latency between two different lgroups
601 	 * or root is involved
602 	 */
603 	if (lgrp_optimizations() && (from != to ||
604 	    from == LGRP_DEFAULT_HANDLE || to == LGRP_DEFAULT_HANDLE))
605 		return (28);
606 	else
607 		return (23);
608 }
609 
610 /* ARGSUSED */
611 void
612 plat_freelist_process(int mnode)
613 {
614 }
615 
616 /*
617  * Find dip for chosen IOSRAM
618  */
619 dev_info_t *
620 find_chosen_dip(void)
621 {
622 	dev_info_t	*dip;
623 	char		master_sbbc[MAXNAMELEN];
624 	pnode_t		nodeid;
625 	uint_t		tunnel;
626 
627 	/*
628 	 * find the /chosen SBBC node, prom interface will handle errors
629 	 */
630 	nodeid = prom_chosennode();
631 
632 	/*
633 	 * get the 'iosram' property from the /chosen node
634 	 */
635 	if (prom_getprop(nodeid, IOSRAM_CHOSEN_PROP, (caddr_t)&tunnel) <= 0) {
636 		SBBC_ERR(CE_PANIC, "No iosram property found! \n");
637 	}
638 
639 	if (prom_phandle_to_path((phandle_t)tunnel, master_sbbc,
640 	    sizeof (master_sbbc)) < 0) {
641 		SBBC_ERR1(CE_PANIC, "prom_phandle_to_path(%d) failed\n",
642 		    tunnel);
643 	}
644 
645 	chosen_nodeid = nodeid;
646 
647 	/*
648 	 * load and attach the sgsbbc driver.
649 	 * This will also attach all the sgsbbc driver instances
650 	 */
651 	if (i_ddi_attach_hw_nodes("sgsbbc") != DDI_SUCCESS) {
652 		cmn_err(CE_WARN, "sgsbbc failed to load\n");
653 	}
654 
655 	/* translate a path name to a dev_info_t */
656 	dip = e_ddi_hold_devi_by_path(master_sbbc, 0);
657 	if ((dip == NULL) || (ddi_get_nodeid(dip) != tunnel)) {
658 		cmn_err(CE_PANIC, "i_ddi_path_to_devi(%x) failed for SBBC\n",
659 		    tunnel);
660 	}
661 
662 	/* make sure devi_ref is ZERO */
663 	ndi_rele_devi(dip);
664 
665 	DCMNERR(CE_CONT, "Chosen IOSRAM is at %s \n", master_sbbc);
666 
667 	return (dip);
668 }
669 
670 void
671 load_platform_drivers(void)
672 {
673 	int ret;
674 
675 	/*
676 	 * Load and attach the mc-us3 memory driver.
677 	 */
678 	if (i_ddi_attach_hw_nodes("mc-us3") != DDI_SUCCESS)
679 		cmn_err(CE_WARN, "mc-us3 failed to load");
680 	else
681 		(void) ddi_hold_driver(ddi_name_to_major("mc-us3"));
682 
683 	/*
684 	 * Initialize the chosen IOSRAM before its clients
685 	 * are loaded.
686 	 */
687 	(void) find_chosen_dip();
688 
689 	/*
690 	 * Ideally, we'd do this in set_platform_defaults(), but
691 	 * at that point it's too early to look up symbols.
692 	 */
693 	iosram_write_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
694 	    modgetsymvalue("iosram_write", 0);
695 
696 	if (iosram_write_ptr == NULL) {
697 		DCMNERR(CE_WARN, "load_platform_defaults: iosram_write()"
698 		    " not found; signatures will not be updated\n");
699 	} else {
700 		/*
701 		 * The iosram read ptr is only needed if we can actually
702 		 * write CPU signatures, so only bother setting it if we
703 		 * set a valid write pointer, above.
704 		 */
705 		iosram_read_ptr = (int (*)(int, uint32_t, caddr_t, uint32_t))
706 		    modgetsymvalue("iosram_read", 0);
707 
708 		if (iosram_read_ptr == NULL)
709 			DCMNERR(CE_WARN, "load_platform_defaults: iosram_read()"
710 			    " not found\n");
711 	}
712 
713 	/*
714 	 * Set todsg_use_sc to TRUE so that we will be getting date
715 	 * from the SC.
716 	 */
717 	todsg_use_sc = TRUE;
718 
719 	/*
720 	 * Now is a good time to activate hardware watchdog (if one exists).
721 	 */
722 	mutex_enter(&tod_lock);
723 	if (watchdog_enable)
724 		ret = tod_ops.tod_set_watchdog_timer(watchdog_timeout_seconds);
725 	mutex_exit(&tod_lock);
726 	if (ret != 0)
727 		printf("Hardware watchdog enabled\n");
728 
729 	/*
730 	 * Load and attach the schizo pci bus nexus driver.
731 	 */
732 	if (i_ddi_attach_hw_nodes("pcisch") != DDI_SUCCESS)
733 		cmn_err(CE_WARN, "pcisch failed to load");
734 
735 	plat_ecc_init();
736 }
737 
738 /*
739  * No platform drivers on this platform
740  */
741 char *platform_module_list[] = {
742 	(char *)0
743 };
744 
745 /*ARGSUSED*/
746 void
747 plat_tod_fault(enum tod_fault_type tod_bad)
748 {
749 }
750 int
751 plat_max_boards()
752 {
753 	return (SG_MAX_BDS);
754 }
755 int
756 plat_max_io_units_per_board()
757 {
758 	return (SG_MAX_IO_PER_BD);
759 }
760 int
761 plat_max_cmp_units_per_board()
762 {
763 	return (SG_MAX_CMPS_PER_BD);
764 }
765 int
766 plat_max_cpu_units_per_board()
767 {
768 	return (SG_MAX_CPUS_PER_BD);
769 }
770 
771 int
772 plat_max_mc_units_per_board()
773 {
774 	return (SG_MAX_CMPS_PER_BD); /* each CPU die has a memory controller */
775 }
776 
777 int
778 plat_max_mem_units_per_board()
779 {
780 	return (SG_MAX_MEM_PER_BD);
781 }
782 
783 int
784 plat_max_cpumem_boards(void)
785 {
786 	return (SG_MAX_CPU_BDS);
787 }
788 
789 int
790 set_platform_max_ncpus(void)
791 {
792 	return (sg_max_ncpus);
793 }
794 
795 void
796 plat_dmv_params(uint_t *hwint, uint_t *swint)
797 {
798 	*hwint = MAX_UPA;
799 	*swint = 0;
800 }
801 
802 /*
803  * Our nodename has been set, pass it along to the SC.
804  */
805 void
806 plat_nodename_set(void)
807 {
808 	sbbc_msg_t	req;	/* request */
809 	sbbc_msg_t	resp;	/* response */
810 	int		rv;	/* return value from call to mbox */
811 	struct nodename_info {
812 		int32_t	namelen;
813 		char	nodename[_SYS_NMLN];
814 	} nni;
815 	int (*sg_mbox)(sbbc_msg_t *, sbbc_msg_t *, time_t) = NULL;
816 
817 	/*
818 	 * find the symbol for the mailbox routine
819 	 */
820 	sg_mbox = (int (*)(sbbc_msg_t *, sbbc_msg_t *, time_t))
821 		modgetsymvalue("sbbc_mbox_request_response", 0);
822 
823 	if (sg_mbox == NULL) {
824 		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox not found\n");
825 		return;
826 	}
827 
828 	/*
829 	 * construct the message telling the SC our nodename
830 	 */
831 	(void) strcpy(nni.nodename, utsname.nodename);
832 	nni.namelen = (int32_t)strlen(nni.nodename);
833 
834 	req.msg_type.type = INFO_MBOX;
835 	req.msg_type.sub_type = INFO_MBOX_NODENAME;
836 	req.msg_status = 0;
837 	req.msg_len = (int)(nni.namelen + sizeof (nni.namelen));
838 	req.msg_bytes = 0;
839 	req.msg_buf = (caddr_t)&nni;
840 	req.msg_data[0] = 0;
841 	req.msg_data[1] = 0;
842 
843 	/*
844 	 * initialize the response back from the SC
845 	 */
846 	resp.msg_type.type = INFO_MBOX;
847 	resp.msg_type.sub_type = INFO_MBOX_NODENAME;
848 	resp.msg_status = 0;
849 	resp.msg_len = 0;
850 	resp.msg_bytes = 0;
851 	resp.msg_buf = (caddr_t)0;
852 	resp.msg_data[0] = 0;
853 	resp.msg_data[1] = 0;
854 
855 	/*
856 	 * ship it and check for success
857 	 */
858 	rv = (sg_mbox)(&req, &resp, sbbc_mbox_default_timeout);
859 
860 	if (rv != 0) {
861 		cmn_err(CE_NOTE, "!plat_nodename_set: sg_mbox retval %d\n", rv);
862 	} else if (resp.msg_status != 0) {
863 		cmn_err(CE_NOTE, "!plat_nodename_set: msg_status %d\n",
864 			resp.msg_status);
865 	} else {
866 		DCMNERR(CE_NOTE, "!plat_nodename_set was successful\n");
867 
868 		/*
869 		 * It is necessary to exchange the capability bitmap
870 		 * with SC before sending any ecc error information and
871 		 * indictment. We are calling the plat_ecc_capability_send()
872 		 * here just after sending the nodename successfully.
873 		 */
874 		rv = plat_ecc_capability_send();
875 		if (rv == 0) {
876 			DCMNERR(CE_NOTE, "!plat_ecc_capability_send was"
877 			    " successful\n");
878 		}
879 	}
880 }
881 
882 /*
883  * flag to allow users switch between using OBP's
884  * prom_get_unum() and mc-us3 driver's p2get_mem_unum()
885  * (for main memory errors only).
886  */
887 int sg_use_prom_get_unum = 0;
888 
889 /*
890  * Debugging flag: set to 1 to call into obp for get_unum, or set it to 0
891  * to call into the unum cache system.  This is the E$ equivalent of
892  * sg_use_prom_get_unum.
893  */
894 int sg_use_prom_ecache_unum = 0;
895 
896 /* used for logging ECC errors to the SC */
897 #define	SG_MEMORY_ECC	1
898 #define	SG_ECACHE_ECC	2
899 #define	SG_UNKNOWN_ECC	(-1)
900 
901 /*
902  * plat_get_mem_unum() generates a string identifying either the
903  * memory or E$ DIMM(s) during error logging. Depending on whether
904  * the error is E$ or memory related, the appropriate support
905  * routine is called to assist in the string generation.
906  *
907  * - For main memory errors we can use the mc-us3 drivers p2getunum()
908  *   (or prom_get_unum() for debugging purposes).
909  *
910  * - For E$ errors we call sg_get_ecacheunum() to generate the unum (or
911  *   prom_serengeti_get_ecacheunum() for debugging purposes).
912  */
913 
914 static int
915 sg_prom_get_unum(int synd_code, uint64_t paddr, char *buf, int buflen,
916     int *lenp)
917 {
918 	if ((prom_get_unum(synd_code, (unsigned long long)paddr,
919 	    buf, buflen, lenp)) != 0)
920 		return (EIO);
921 	else if (*lenp <= 1)
922 		return (EINVAL);
923 	else
924 		return (0);
925 }
926 
927 /*ARGSUSED*/
928 int
929 plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
930     int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
931 {
932 	/*
933 	 * unum_func will either point to the memory drivers p2get_mem_unum()
934 	 * or to prom_get_unum() for memory errors.
935 	 */
936 	int (*unum_func)(int synd_code, uint64_t paddr, char *buf,
937 	    int buflen, int *lenp) = p2get_mem_unum;
938 
939 	/*
940 	 * check if it's a Memory or an Ecache error.
941 	 */
942 	if (flt_in_memory) {
943 		/*
944 		 * It's a main memory error.
945 		 *
946 		 * For debugging we allow the user to switch between
947 		 * using OBP's get_unum and the memory driver's get_unum
948 		 * so we create a pointer to the functions and switch
949 		 * depending on the sg_use_prom_get_unum flag.
950 		 */
951 		if (sg_use_prom_get_unum) {
952 			DCMNERR(CE_NOTE, "Using prom_get_unum from OBP");
953 			return (sg_prom_get_unum(synd_code,
954 			    P2ALIGN(flt_addr, 8), buf, buflen, lenp));
955 		} else if (unum_func != NULL) {
956 			return (unum_func(synd_code, P2ALIGN(flt_addr, 8),
957 			    buf, buflen, lenp));
958 		} else {
959 			return (ENOTSUP);
960 		}
961 	} else if (flt_status & ECC_ECACHE) {
962 		/*
963 		 * It's an E$ error.
964 		 */
965 		if (sg_use_prom_ecache_unum) {
966 			/*
967 			 * We call to OBP to handle this.
968 			 */
969 			DCMNERR(CE_NOTE,
970 			    "Using prom_serengeti_get_ecacheunum from OBP");
971 			if (prom_serengeti_get_ecacheunum(flt_bus_id,
972 			    P2ALIGN(flt_addr, 8), buf, buflen, lenp) != 0) {
973 				return (EIO);
974 			}
975 		} else {
976 			return (sg_get_ecacheunum(flt_bus_id, flt_addr,
977 			    buf, buflen, lenp));
978 		}
979 	} else {
980 		return (ENOTSUP);
981 	}
982 
983 	return (0);
984 }
985 
986 /*
987  * This platform hook gets called from mc_add_mem_unum_label() in the mc-us3
988  * driver giving each platform the opportunity to add platform
989  * specific label information to the unum for ECC error logging purposes.
990  */
991 void
992 plat_add_mem_unum_label(char *unum, int mcid, int bank, int dimm)
993 {
994 	char	new_unum[UNUM_NAMLEN] = "";
995 	int	node = SG_PORTID_TO_NODEID(mcid);
996 	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(mcid);
997 	int	position = SG_PORTID_TO_CPU_POSN(mcid);
998 
999 	/*
1000 	 * The mc-us3 driver deals with logical banks but for unum
1001 	 * purposes we need to use physical banks so that the correct
1002 	 * dimm can be physically located. Logical banks 0 and 2
1003 	 * make up physical bank 0. Logical banks 1 and 3 make up
1004 	 * physical bank 1. Here we do the necessary conversion.
1005 	 */
1006 	bank = (bank % 2);
1007 
1008 	if (dimm == -1) {
1009 		SG_SET_FRU_NAME_NODE(new_unum, node);
1010 		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1011 		SG_SET_FRU_NAME_MODULE(new_unum, position);
1012 		SG_SET_FRU_NAME_BANK(new_unum, bank);
1013 
1014 	} else {
1015 		SG_SET_FRU_NAME_NODE(new_unum, node);
1016 		SG_SET_FRU_NAME_CPU_BOARD(new_unum, board);
1017 		SG_SET_FRU_NAME_MODULE(new_unum, position);
1018 		SG_SET_FRU_NAME_BANK(new_unum, bank);
1019 		SG_SET_FRU_NAME_DIMM(new_unum, dimm);
1020 
1021 		strcat(new_unum, " ");
1022 		strcat(new_unum, unum);
1023 	}
1024 
1025 	strcpy(unum, new_unum);
1026 }
1027 
1028 int
1029 plat_get_cpu_unum(int cpuid, char *buf, int buflen, int *lenp)
1030 {
1031 	int	node = SG_PORTID_TO_NODEID(cpuid);
1032 	int	board = SG_CPU_BD_PORTID_TO_BD_NUM(cpuid);
1033 
1034 	if (snprintf(buf, buflen, "/N%d/%s%d", node,
1035 	    SG_HPU_TYPE_CPU_BOARD_ID, board) >= buflen) {
1036 		return (ENOSPC);
1037 	} else {
1038 		*lenp = strlen(buf);
1039 		return (0);
1040 	}
1041 }
1042 
1043 /*
1044  * We log all ECC events to the SC so we send a mailbox
1045  * message to the SC passing it the relevant data.
1046  * ECC mailbox messages are sent via a taskq mechanism to
1047  * prevent impaired system performance during ECC floods.
1048  * Indictments have already passed through a taskq, so they
1049  * are not queued here.
1050  */
1051 int
1052 plat_send_ecc_mailbox_msg(plat_ecc_message_type_t msg_type, void *datap)
1053 {
1054 	sbbc_ecc_mbox_t	*msgp;
1055 	size_t		msg_size;
1056 	uint16_t	msg_subtype;
1057 	int		sleep_flag, log_error;
1058 
1059 	if (sg_ecc_taskq_func == NULL) {
1060 		sg_ecc_taskq_func = (void (*)(sbbc_ecc_mbox_t *))
1061 		    modgetsymvalue("sbbc_mbox_queue_ecc_event", 0);
1062 		if (sg_ecc_taskq_func == NULL) {
1063 			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1064 			    "sbbc_mbox_queue_ecc_event not found");
1065 			return (ENODEV);
1066 		}
1067 	}
1068 	if (sg_ecc_mbox_func == NULL) {
1069 		sg_ecc_mbox_func = (int (*)(sbbc_ecc_mbox_t *))
1070 		    modgetsymvalue("sbbc_mbox_ecc_output", 0);
1071 		if (sg_ecc_mbox_func == NULL) {
1072 			cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1073 			    "sbbc_mbox_ecc_output not found");
1074 			return (ENODEV);
1075 		}
1076 	}
1077 
1078 	/*
1079 	 * Initialize the request and response structures
1080 	 */
1081 	switch (msg_type) {
1082 	case PLAT_ECC_ERROR_MESSAGE:
1083 		msg_subtype = INFO_MBOX_ERROR_ECC;
1084 		msg_size = sizeof (plat_ecc_error_data_t);
1085 		sleep_flag = KM_NOSLEEP;
1086 		log_error = 1;
1087 		break;
1088 	case PLAT_ECC_ERROR2_MESSAGE:
1089 		msg_subtype = INFO_MBOX_ECC;
1090 		msg_size = sizeof (plat_ecc_error2_data_t);
1091 		sleep_flag = KM_NOSLEEP;
1092 		log_error = 1;
1093 		break;
1094 	case PLAT_ECC_INDICTMENT_MESSAGE:
1095 		msg_subtype = INFO_MBOX_ERROR_INDICT;
1096 		msg_size = sizeof (plat_ecc_indictment_data_t);
1097 		sleep_flag = KM_SLEEP;
1098 		log_error = 0;
1099 		break;
1100 	case PLAT_ECC_INDICTMENT2_MESSAGE:
1101 		msg_subtype = INFO_MBOX_ECC;
1102 		msg_size = sizeof (plat_ecc_indictment2_data_t);
1103 		sleep_flag = KM_SLEEP;
1104 		log_error = 0;
1105 		break;
1106 	case PLAT_ECC_CAPABILITY_MESSAGE:
1107 		msg_subtype = INFO_MBOX_ECC_CAP;
1108 		msg_size = sizeof (plat_capability_data_t) +
1109 		    strlen(utsname.release) + strlen(utsname.version) + 2;
1110 		sleep_flag = KM_SLEEP;
1111 		log_error = 0;
1112 		break;
1113 	case PLAT_ECC_DIMM_SID_MESSAGE:
1114 		msg_subtype = INFO_MBOX_ECC;
1115 		msg_size = sizeof (plat_dimm_sid_request_data_t);
1116 		sleep_flag = KM_SLEEP;
1117 		log_error = 0;
1118 		break;
1119 	default:
1120 		return (EINVAL);
1121 	}
1122 
1123 	msgp = (sbbc_ecc_mbox_t	*)kmem_zalloc(sizeof (sbbc_ecc_mbox_t),
1124 		sleep_flag);
1125 	if (msgp == NULL) {
1126 		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1127 				"unable to allocate sbbc_ecc_mbox");
1128 		return (ENOMEM);
1129 	}
1130 
1131 	msgp->ecc_log_error = log_error;
1132 
1133 	msgp->ecc_req.msg_type.type = INFO_MBOX;
1134 	msgp->ecc_req.msg_type.sub_type = msg_subtype;
1135 	msgp->ecc_req.msg_status = 0;
1136 	msgp->ecc_req.msg_len = (int)msg_size;
1137 	msgp->ecc_req.msg_bytes = 0;
1138 	msgp->ecc_req.msg_buf = (caddr_t)kmem_zalloc(msg_size, sleep_flag);
1139 	msgp->ecc_req.msg_data[0] = 0;
1140 	msgp->ecc_req.msg_data[1] = 0;
1141 
1142 	if (msgp->ecc_req.msg_buf == NULL) {
1143 		cmn_err(CE_NOTE, "!plat_send_ecc_mailbox_msg: "
1144 				"unable to allocate request msg_buf");
1145 		kmem_free((void *)msgp, sizeof (sbbc_ecc_mbox_t));
1146 		return (ENOMEM);
1147 	}
1148 	bcopy(datap, (void *)msgp->ecc_req.msg_buf, msg_size);
1149 
1150 	/*
1151 	 * initialize the response back from the SC
1152 	 */
1153 	msgp->ecc_resp.msg_type.type = INFO_MBOX;
1154 	msgp->ecc_resp.msg_type.sub_type = msg_subtype;
1155 	msgp->ecc_resp.msg_status = 0;
1156 	msgp->ecc_resp.msg_len = 0;
1157 	msgp->ecc_resp.msg_bytes = 0;
1158 	msgp->ecc_resp.msg_buf = NULL;
1159 	msgp->ecc_resp.msg_data[0] = 0;
1160 	msgp->ecc_resp.msg_data[1] = 0;
1161 
1162 	switch (msg_type) {
1163 	case PLAT_ECC_ERROR_MESSAGE:
1164 	case PLAT_ECC_ERROR2_MESSAGE:
1165 		/*
1166 		 * For Error Messages, we go through a taskq.
1167 		 * Queue up the message for processing
1168 		 */
1169 		(*sg_ecc_taskq_func)(msgp);
1170 		return (0);
1171 
1172 	case PLAT_ECC_CAPABILITY_MESSAGE:
1173 		/*
1174 		 * For indictment and capability messages, we've already gone
1175 		 * through the taskq, so we can call the mailbox routine
1176 		 * directly.  Find the symbol for the routine that sends
1177 		 * the mailbox msg
1178 		 */
1179 		msgp->ecc_resp.msg_len = (int)msg_size;
1180 		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(msg_size,
1181 		    sleep_flag);
1182 		/* FALLTHRU */
1183 
1184 	case PLAT_ECC_INDICTMENT_MESSAGE:
1185 	case PLAT_ECC_INDICTMENT2_MESSAGE:
1186 		return ((*sg_ecc_mbox_func)(msgp));
1187 
1188 	case PLAT_ECC_DIMM_SID_MESSAGE:
1189 		msgp->ecc_resp.msg_len = sizeof (plat_dimm_sid_board_data_t);
1190 		msgp->ecc_resp.msg_buf = (caddr_t)kmem_zalloc(
1191 		    sizeof (plat_dimm_sid_board_data_t), sleep_flag);
1192 		return ((*sg_ecc_mbox_func)(msgp));
1193 
1194 	default:
1195 		ASSERT(0);
1196 		return (EINVAL);
1197 	}
1198 }
1199 
1200 /*
1201  * m is redundant on serengeti as the multiplier is always 4
1202  */
1203 /*ARGSUSED*/
1204 int
1205 plat_make_fru_cpuid(int sb, int m, int proc)
1206 {
1207 	return (MAKE_CPUID(sb, proc));
1208 }
1209 
1210 /*
1211  * board number for a given proc
1212  */
1213 int
1214 plat_make_fru_boardnum(int proc)
1215 {
1216 	return (SG_CPU_BD_PORTID_TO_BD_NUM(proc));
1217 }
1218 
1219 static
1220 void
1221 cpu_sgn_update(ushort_t sig, uchar_t state, uchar_t sub_state, int cpuid)
1222 {
1223 	uint32_t signature = CPU_SIG_BLD(sig, state, sub_state);
1224 	sig_state_t current_sgn;
1225 	int i;
1226 
1227 	if (iosram_write_ptr == NULL) {
1228 		/*
1229 		 * If the IOSRAM write pointer isn't set, we won't be able
1230 		 * to write signatures to ANYTHING, so we may as well just
1231 		 * write out an error message (if desired) and exit this
1232 		 * routine now...
1233 		 */
1234 		DCMNERR(CE_WARN,
1235 		    "cpu_sgn_update: iosram_write() not found;"
1236 		    " cannot write signature 0x%x for CPU(s) or domain\n",
1237 		    signature);
1238 		return;
1239 	}
1240 
1241 
1242 	/*
1243 	 * Differentiate a panic reboot from a non-panic reboot in the
1244 	 * setting of the substate of the signature.
1245 	 *
1246 	 * If the new substate is REBOOT and we're rebooting due to a panic,
1247 	 * then set the new substate to a special value indicating a panic
1248 	 * reboot, SIGSUBST_PANIC_REBOOT.
1249 	 *
1250 	 * A panic reboot is detected by a current (previous) domain signature
1251 	 * state of SIGST_EXIT, and a new signature substate of SIGSUBST_REBOOT.
1252 	 * The domain signature state SIGST_EXIT is used as the panic flow
1253 	 * progresses.
1254 	 *
1255 	 * At the end of the panic flow, the reboot occurs but we should now
1256 	 * one that was involuntary, something that may be quite useful to know
1257 	 * at OBP level.
1258 	 */
1259 	if (sub_state == SIGSUBST_REBOOT) {
1260 		if (iosram_read_ptr == NULL) {
1261 			DCMNERR(CE_WARN,
1262 			    "cpu_sgn_update: iosram_read() not found;"
1263 			    " could not check current domain signature\n");
1264 		} else {
1265 			(void) (*iosram_read_ptr)(SBBC_SIGBLCK_KEY,
1266 				SG_SGNBLK_DOMAINSIG_OFFSET,
1267 				(char *)&current_sgn, sizeof (current_sgn));
1268 			if (current_sgn.state_t.state == SIGST_EXIT)
1269 				signature = CPU_SIG_BLD(sig, state,
1270 					SIGSUBST_PANIC_REBOOT);
1271 		}
1272 	}
1273 
1274 	/*
1275 	 * cpuid == -1 indicates that the operation applies to all cpus.
1276 	 */
1277 	if (cpuid >= 0) {
1278 		(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1279 			SG_SGNBLK_CPUSIG_OFFSET(cpuid), (char *)&signature,
1280 			sizeof (signature));
1281 	} else {
1282 		for (i = 0; i < NCPU; i++) {
1283 			if (cpu[i] == NULL || !(cpu[i]->cpu_flags &
1284 				(CPU_EXISTS|CPU_QUIESCED))) {
1285 				continue;
1286 			}
1287 			(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1288 				SG_SGNBLK_CPUSIG_OFFSET(i), (char *)&signature,
1289 				sizeof (signature));
1290 		}
1291 	}
1292 
1293 	if (state == SIGST_OFFLINE || state == SIGST_DETACHED) {
1294 		return;
1295 	}
1296 
1297 	(void) (*iosram_write_ptr)(SBBC_SIGBLCK_KEY,
1298 		SG_SGNBLK_DOMAINSIG_OFFSET, (char *)&signature,
1299 		sizeof (signature));
1300 }
1301 
1302 void
1303 startup_platform(void)
1304 {
1305 }
1306 
1307 /*
1308  * A routine to convert a number (represented as a string) to
1309  * the integer value it represents.
1310  */
1311 
1312 static int
1313 isdigit(int ch)
1314 {
1315 	return (ch >= '0' && ch <= '9');
1316 }
1317 
1318 #define	isspace(c)	((c) == ' ' || (c) == '\t' || (c) == '\n')
1319 
1320 static int
1321 strtoi(char *p, char **pos)
1322 {
1323 	int n;
1324 	int c, neg = 0;
1325 
1326 	if (!isdigit(c = *p)) {
1327 		while (isspace(c))
1328 			c = *++p;
1329 		switch (c) {
1330 			case '-':
1331 				neg++;
1332 				/* FALLTHROUGH */
1333 			case '+':
1334 			c = *++p;
1335 		}
1336 		if (!isdigit(c)) {
1337 			if (pos != NULL)
1338 				*pos = p;
1339 			return (0);
1340 		}
1341 	}
1342 	for (n = '0' - c; isdigit(c = *++p); ) {
1343 		n *= 10; /* two steps to avoid unnecessary overflow */
1344 		n += '0' - c; /* accum neg to avoid surprises at MAX */
1345 	}
1346 	if (pos != NULL)
1347 		*pos = p;
1348 	return (neg ? n : -n);
1349 }
1350 
1351 /*
1352  * Get the three parts of the Serengeti PROM version.
1353  * Used for feature readiness tests.
1354  *
1355  * Return 0 if version extracted successfully, -1 otherwise.
1356  */
1357 
1358 int
1359 sg_get_prom_version(int *sysp, int *intfp, int *bldp)
1360 {
1361 	int plen;
1362 	char vers[512];
1363 	static pnode_t node;
1364 	static char version[] = "version";
1365 	char *verp, *ep;
1366 
1367 	node = prom_finddevice("/openprom");
1368 	if (node == OBP_BADNODE)
1369 		return (-1);
1370 
1371 	plen = prom_getproplen(node, version);
1372 	if (plen <= 0 || plen >= sizeof (vers))
1373 		return (-1);
1374 	(void) prom_getprop(node, version, vers);
1375 	vers[plen] = '\0';
1376 
1377 	/* Make sure it's an OBP flashprom */
1378 	if (vers[0] != 'O' && vers[1] != 'B' && vers[2] != 'P') {
1379 		cmn_err(CE_WARN, "sg_get_prom_version: "
1380 		    "unknown <version> string in </openprom>\n");
1381 		return (-1);
1382 	}
1383 	verp = &vers[4];
1384 
1385 	*sysp = strtoi(verp, &ep);
1386 	if (ep == verp || *ep != '.')
1387 		return (-1);
1388 	verp = ep + 1;
1389 
1390 	*intfp = strtoi(verp, &ep);
1391 	if (ep == verp || *ep != '.')
1392 		return (-1);
1393 	verp = ep + 1;
1394 
1395 	*bldp = strtoi(verp, &ep);
1396 	if (ep == verp || (*ep != '\0' && !isspace(*ep)))
1397 		return (-1);
1398 	return (0);
1399 }
1400 
1401 /*
1402  * Return 0 if system board Dynamic Reconfiguration
1403  * is supported by the firmware, -1 otherwise.
1404  */
1405 int
1406 sg_prom_sb_dr_check(void)
1407 {
1408 	static int prom_res = 1;
1409 
1410 	if (prom_res == 1) {
1411 		int sys, intf, bld;
1412 		int rv;
1413 
1414 		rv = sg_get_prom_version(&sys, &intf, &bld);
1415 		if (rv == 0 && sys == 5 &&
1416 		    (intf >= 12 || (intf == 11 && bld >= 200))) {
1417 			prom_res = 0;
1418 		} else {
1419 			prom_res = -1;
1420 		}
1421 	}
1422 	return (prom_res);
1423 }
1424 
1425 /*
1426  * Return 0 if cPCI Dynamic Reconfiguration
1427  * is supported by the firmware, -1 otherwise.
1428  */
1429 int
1430 sg_prom_cpci_dr_check(void)
1431 {
1432 	/*
1433 	 * The version check is currently the same as for
1434 	 * system boards. Since the two DR sub-systems are
1435 	 * independent, this could change.
1436 	 */
1437 	return (sg_prom_sb_dr_check());
1438 }
1439 
1440 /*
1441  * KDI functions - used by the in-situ kernel debugger (kmdb) to perform
1442  * platform-specific operations.  These functions execute when the world is
1443  * stopped, and as such cannot make any blocking calls, hold locks, etc.
1444  * promif functions are a special case, and may be used.
1445  */
1446 
1447 /*
1448  * Our implementation of this KDI op updates the CPU signature in the system
1449  * controller.  Note that we set the signature to OBP_SIG, rather than DBG_SIG.
1450  * The Forth words we execute will, among other things, transform our OBP_SIG
1451  * into DBG_SIG.  They won't function properly if we try to use DBG_SIG.
1452  */
1453 static void
1454 sg_system_claim(void)
1455 {
1456 	prom_interpret("sigb-sig! my-sigb-sig!", OBP_SIG, OBP_SIG, 0, 0, 0);
1457 }
1458 
1459 static void
1460 sg_system_release(void)
1461 {
1462 	prom_interpret("sigb-sig! my-sigb-sig!", OS_SIG, OS_SIG, 0, 0, 0);
1463 }
1464 
1465 static void
1466 sg_console_claim(void)
1467 {
1468 	prom_serengeti_set_console_input(SGCN_OBP_STR);
1469 }
1470 
1471 static void
1472 sg_console_release(void)
1473 {
1474 	prom_serengeti_set_console_input(SGCN_CLNT_STR);
1475 }
1476 
1477 void
1478 plat_kdi_init(kdi_t *kdi)
1479 {
1480 	kdi->pkdi_system_claim = sg_system_claim;
1481 	kdi->pkdi_system_release = sg_system_release;
1482 	kdi->pkdi_console_claim = sg_console_claim;
1483 	kdi->pkdi_console_release = sg_console_release;
1484 }
1485