1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright 2019 Joyent, Inc.
29  * Copyright 2023 Oxide Computer Company
30  */
31 
32 /*
33  * s1394_dev_disc.c
34  *    1394 Services Layer Device Discovery Routines
35  *    This file contains the bus reset thread code, bus manager routines and
36  *    various routines that are used to implement remote Config ROM reading.
37  *
38  *    FUTURE:
39  *    Rescan the bus if invalid nodes are seen.
40  *    Investigate taskq for reading phase2 config rom reads.
41  *    If we are reading the entire bus info blk, we should attempt
42  *    a block read and fallback to quad reads if this fails.
43  */
44 
45 #include <sys/conf.h>
46 #include <sys/sysmacros.h>
47 #include <sys/ddi.h>
48 #include <sys/sunddi.h>
49 #include <sys/cmn_err.h>
50 #include <sys/sunndi.h>
51 #include <sys/modctl.h>
52 #include <sys/ddi_impldefs.h>
53 #include <sys/types.h>
54 #include <sys/kmem.h>
55 #include <sys/kstat.h>
56 #include <sys/varargs.h>
57 
58 #include <sys/1394/t1394.h>
59 #include <sys/1394/s1394.h>
60 #include <sys/1394/h1394.h>
61 #include <sys/1394/ieee1394.h>
62 #include <sys/1394/ieee1212.h>
63 
64 /* hcmd_ret_t */
65 typedef enum {
66 	S1394_HCMD_INVALID,
67 	S1394_HCMD_NODE_DONE,
68 	S1394_HCMD_NODE_EXPECT_MORE,
69 	S1394_HCMD_LOCK_FAILED
70 } hcmd_ret_t;
71 
72 #define	QUAD_TO_CFGROM_ADDR(b, n, q, addr) {			\
73 	uint64_t bl = (b);					\
74 	uint64_t nl = (n);					\
75 	addr = ((bl) << IEEE1394_ADDR_BUS_ID_SHIFT) |		\
76 		((nl) << IEEE1394_ADDR_PHY_ID_SHIFT);		\
77 	addr += IEEE1394_CONFIG_ROM_ADDR + ((q) << 2);		\
78 }
79 
80 #define	CFGROM_READ_PAUSE(d)						\
81 	((s1394_cfgrom_read_delay_ms == 0) ? (void) 0 :			\
82 	delay(drv_usectohz((d) * 1000)))
83 
84 #define	BUMP_CFGROM_READ_DELAY(n)					\
85 	(n)->cfgrom_read_delay += s1394_cfgrom_read_delay_incr
86 
87 #define	CFGROM_GET_READ_DELAY(n, d)					\
88 	((d) = (n)->cfgrom_read_delay)
89 
90 #define	SETUP_QUAD_READ(n, reset_fails, quadlet, cnt)			\
91 {									\
92 	int i = (reset_fails);						\
93 	if (i != 0) {							\
94 		(n)->cfgrom_read_fails = 0;				\
95 		(n)->cfgrom_read_delay = (uchar_t)s1394_cfgrom_read_delay_ms; \
96 	}								\
97 	(n)->cfgrom_quad_to_read = (quadlet);				\
98 	(n)->cfgrom_quad_read_cnt = (cnt);				\
99 }
100 
101 static void s1394_wait_for_events(s1394_hal_t *hal, int firsttime);
102 
103 static int s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
104     hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd));
105 
106 static void s1394_flush_cmplq(s1394_hal_t *hal);
107 
108 static void s1394_br_thread_exit(s1394_hal_t *hal);
109 
110 static void s1394_target_bus_reset_notifies(s1394_hal_t *hal,
111     t1394_localinfo_t *localinfo);
112 
113 static int s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
114     s1394_status_t *status);
115 
116 static int s1394_cfgrom_scan_phase1(s1394_hal_t *hal);
117 
118 static hcmd_ret_t s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal,
119     cmd1394_cmd_t *cmd);
120 
121 static int s1394_cfgrom_scan_phase2(s1394_hal_t *hal);
122 
123 static hcmd_ret_t s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal,
124     cmd1394_cmd_t *cmd);
125 
126 static int s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
127     s1394_status_t *status);
128 
129 static void s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd);
130 
131 static void s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num,
132     uint32_t *quadlet, uint32_t *data);
133 
134 static int s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode);
135 
136 static int s1394_match_all_GUIDs(s1394_hal_t *hal);
137 
138 static void s1394_become_bus_mgr(void *arg);
139 
140 static void s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd);
141 
142 static int s1394_bus_mgr_processing(s1394_hal_t *hal);
143 
144 static int s1394_do_bus_mgr_processing(s1394_hal_t *hal);
145 
146 static void s1394_bus_mgr_timers_stop(s1394_hal_t *hal,
147     timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
148 
149 static void s1394_bus_mgr_timers_start(s1394_hal_t *hal,
150     timeout_id_t *bus_mgr_query_tid, timeout_id_t *bus_mgr_tid);
151 
152 static int s1394_cycle_master_capable(s1394_hal_t *hal);
153 
154 static int s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root,
155     int new_gap_cnt, uint32_t IRM_flags);
156 
157 static void s1394_phy_config_callback(cmd1394_cmd_t *cmd);
158 
159 static int s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node,
160     uint32_t quadlet, uint32_t *nextquadp);
161 
162 static int s1394_cfgrom_read_retry_cnt = 3;	/* 1 + 3 retries */
163 static int s1394_cfgrom_read_delay_ms = 20;	/* start with 20ms */
164 static int s1394_cfgrom_read_delay_incr = 10;	/* 10ms increments */
165 static int s1394_enable_crc_validation = 0;
166 static int s1394_turn_off_dir_stack = 0;
167 static int s1394_crcsz_is_cfgsz = 0;
168 static int s1394_enable_rio_pass1_workarounds = 0;
169 
170 /*
171  * s1394_br_thread()
172  *    is the bus reset thread. Its sole purpose is to read/reread config roms
173  *    as appropriate and do bus reset time things (bus manager processing,
174  *    isoch resource reallocation etc.).
175  */
176 void
177 s1394_br_thread(s1394_hal_t *hal)
178 {
179 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
180 
181 	/* Initialize the Bus Mgr timers */
182 	hal->bus_mgr_timeout_id = 0;
183 	hal->bus_mgr_query_timeout_id = 0;
184 
185 	/* Initialize the cmpletion Q */
186 	mutex_enter(&hal->br_cmplq_mutex);
187 	hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
188 	mutex_exit(&hal->br_cmplq_mutex);
189 
190 	s1394_wait_for_events(hal, 1);
191 
192 	for (;;) {
193 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
194 
195 		s1394_wait_for_events(hal, 0);
196 
197 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
198 
199 		/* stop bus manager timeouts, if needed */
200 		s1394_bus_mgr_timers_stop(hal, &hal->bus_mgr_query_timeout_id,
201 		    &hal->bus_mgr_timeout_id);
202 
203 		s1394_flush_cmplq(hal);
204 
205 		/* start timers for checking bus manager, if needed */
206 		s1394_bus_mgr_timers_start(hal, &hal->bus_mgr_query_timeout_id,
207 		    &hal->bus_mgr_timeout_id);
208 
209 		/* Try to reallocate all isoch resources */
210 		s1394_isoch_rsrc_realloc(hal);
211 
212 		if (s1394_cfgrom_scan_phase1(hal) != DDI_SUCCESS) {
213 			continue;
214 		}
215 
216 		if (s1394_bus_mgr_processing(hal) != DDI_SUCCESS) {
217 			continue;
218 		}
219 
220 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
221 
222 		if (s1394_cfgrom_scan_phase2(hal) != DDI_SUCCESS) {
223 			continue;
224 		}
225 
226 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
227 	}
228 }
229 
230 /*
231  * s1394_wait_for_events()
232  *    blocks waiting for a cv_signal on the bus reset condition variable.
233  *    Used by the bus reset thread for synchronizing with the bus reset/
234  *    self id interrupt callback from the hal. Does CPR initialization
235  *    first time it is called. If services layer sees a valid self id
236  *    buffer, it builds the topology tree and signals the bus reset thread
237  *    to read the config roms as appropriate (indicated by BR_THR_CFGROM_SCAN).
238  *    If the services layer wishes to kill the bus reset thread, it signals
239  *    this by signaling a BR_THR_GO_AWAY event.
240  */
241 static void
242 s1394_wait_for_events(s1394_hal_t *hal, int firsttime)
243 {
244 	uint_t event;
245 
246 	ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
247 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
248 
249 	if (firsttime)
250 		CALLB_CPR_INIT(&hal->hal_cprinfo, &hal->br_thread_mutex,
251 		    callb_generic_cpr, "s1394_br_thread");
252 
253 	/* Check and wait for a BUS RESET */
254 	mutex_enter(&hal->br_thread_mutex);
255 	while ((event = hal->br_thread_ev_type) == 0) {
256 		CALLB_CPR_SAFE_BEGIN(&hal->hal_cprinfo);
257 		cv_wait(&hal->br_thread_cv, &hal->br_thread_mutex);
258 		CALLB_CPR_SAFE_END(&hal->hal_cprinfo, &hal->br_thread_mutex);
259 	}
260 
261 	if (event & BR_THR_GO_AWAY) {
262 		s1394_br_thread_exit(hal);
263 		/*NOTREACHED*/
264 		return;
265 	}
266 
267 	if (firsttime) {
268 		mutex_exit(&hal->br_thread_mutex);
269 		return;
270 	}
271 
272 	mutex_enter(&hal->topology_tree_mutex);
273 	hal->br_cfgrom_read_gen = hal->generation_count;
274 
275 	hal->br_thread_ev_type &= ~BR_THR_CFGROM_SCAN;
276 	mutex_exit(&hal->topology_tree_mutex);
277 	mutex_exit(&hal->br_thread_mutex);
278 }
279 
280 /*
281  * s1394_wait_for_cfgrom_callbacks()
282  *    Waits for completed config rom reads. Takes each completion off the
283  *    completion queue and passes it to the "completion handler" function
284  *    that was passed in as an argument. Further processing of the completion
285  *    queue depends on the return status of the completion handler. If there
286  *    is a bus reset while waiting for completions or if the services layer
287  *    signals BR_THR_GO_AWAY, quits waiting for completions and returns
288  *    non-zero. Also returns non-zero if completion handler returns
289  *    S1394_HCMD_LOCK_FAILED.  Returns 0 if config roms for all nodes have
290  *    been dealt with.
291  */
292 static int
293 s1394_wait_for_cfgrom_callbacks(s1394_hal_t *hal, uint_t wait_gen,
294     hcmd_ret_t(*handle_cmd_fn)(s1394_hal_t *hal, cmd1394_cmd_t *cmd))
295 {
296 	cmd1394_cmd_t *cmd;
297 	s1394_cmd_priv_t *s_priv;
298 	int ret, done = 0;
299 	hcmd_ret_t cmdret;
300 
301 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
302 
303 	ret = DDI_SUCCESS;
304 
305 	while (!done) {
306 		mutex_enter(&hal->br_cmplq_mutex);
307 		mutex_enter(&hal->topology_tree_mutex);
308 		while (wait_gen == hal->generation_count &&
309 		    (hal->br_thread_ev_type & BR_THR_GO_AWAY) == 0 &&
310 		    hal->br_cmplq_head == NULL) {
311 			mutex_exit(&hal->topology_tree_mutex);
312 			cv_wait(&hal->br_cmplq_cv, &hal->br_cmplq_mutex);
313 			mutex_enter(&hal->topology_tree_mutex);
314 		}
315 		ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
316 		if (wait_gen != hal->generation_count ||
317 		    (hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
318 			mutex_exit(&hal->topology_tree_mutex);
319 			mutex_exit(&hal->br_cmplq_mutex);
320 			s1394_flush_cmplq(hal);
321 			return (DDI_FAILURE);
322 		}
323 		mutex_exit(&hal->topology_tree_mutex);
324 
325 		if ((cmd = hal->br_cmplq_head) != NULL) {
326 			s_priv = S1394_GET_CMD_PRIV(cmd);
327 
328 			hal->br_cmplq_head = s_priv->cmd_priv_next;
329 		}
330 		if (cmd == hal->br_cmplq_tail)
331 			hal->br_cmplq_tail = NULL;
332 		mutex_exit(&hal->br_cmplq_mutex);
333 
334 		if (cmd != NULL) {
335 			if (cmd->bus_generation != wait_gen) {
336 				(void) s1394_free_cmd(hal, &cmd);
337 				continue;
338 			}
339 			cmdret = (*handle_cmd_fn)(hal, cmd);
340 			ASSERT(cmdret != S1394_HCMD_INVALID);
341 			if (cmdret == S1394_HCMD_LOCK_FAILED) {
342 				/* flush completion queue */
343 				ret = DDI_FAILURE;
344 				s1394_flush_cmplq(hal);
345 				break;
346 			} else if (cmdret == S1394_HCMD_NODE_DONE) {
347 				if (--hal->cfgroms_being_read == 0) {
348 					/* All done */
349 					break;
350 				}
351 			} else {
352 				ASSERT(cmdret == S1394_HCMD_NODE_EXPECT_MORE);
353 				done = 0;
354 			}
355 		}
356 	}
357 
358 	return (ret);
359 }
360 
361 /*
362  * s1394_flush_cmplq()
363  *    Frees all cmds on the completion queue.
364  */
365 static void
366 s1394_flush_cmplq(s1394_hal_t *hal)
367 {
368 	s1394_cmd_priv_t *s_priv;
369 	cmd1394_cmd_t *cmd, *tcmd;
370 
371 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
372 
373 	cmd = NULL;
374 
375 	do {
376 		mutex_enter(&hal->br_cmplq_mutex);
377 		cmd = hal->br_cmplq_head;
378 		hal->br_cmplq_head = hal->br_cmplq_tail = NULL;
379 		mutex_exit(&hal->br_cmplq_mutex);
380 
381 		while (cmd != NULL) {
382 			s_priv = S1394_GET_CMD_PRIV(cmd);
383 
384 			tcmd = s_priv->cmd_priv_next;
385 			(void) s1394_free_cmd(hal, &cmd);
386 			cmd = tcmd;
387 		}
388 
389 		mutex_enter(&hal->br_cmplq_mutex);
390 		cmd = hal->br_cmplq_head;
391 		mutex_exit(&hal->br_cmplq_mutex);
392 
393 	} while (cmd != NULL);
394 }
395 
396 /*
397  * s1394_br_thread_exit()
398  *    Flushes the completion queue and calls thread_exit() (which effectively
399  *    kills the bus reset thread).
400  */
401 static void
402 s1394_br_thread_exit(s1394_hal_t *hal)
403 {
404 	ASSERT(MUTEX_HELD(&hal->br_thread_mutex));
405 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
406 	s1394_flush_cmplq(hal);
407 #ifndef	__lock_lint
408 	CALLB_CPR_EXIT(&hal->hal_cprinfo);
409 #endif
410 	hal->br_thread_ev_type &= ~BR_THR_GO_AWAY;
411 	thread_exit();
412 	/*NOTREACHED*/
413 }
414 
415 /*
416  * s1394_target_bus_reset_notifies()
417  *    tells the ndi event framework to invoke any callbacks registered for
418  *    "bus reset event".
419  */
420 static void
421 s1394_target_bus_reset_notifies(s1394_hal_t *hal, t1394_localinfo_t *localinfo)
422 {
423 	ddi_eventcookie_t cookie;
424 
425 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
426 
427 	if (ndi_event_retrieve_cookie(hal->hal_ndi_event_hdl, NULL,
428 	    DDI_DEVI_BUS_RESET_EVENT, &cookie, NDI_EVENT_NOPASS) ==
429 	    NDI_SUCCESS) {
430 		(void) ndi_event_run_callbacks(hal->hal_ndi_event_hdl, NULL,
431 		    cookie, localinfo);
432 	}
433 }
434 
435 /*
436  * s1394_alloc_cfgrom()
437  *    Allocates config rom for the node. Sets CFGROM_NEW_ALLOC bit in the
438  *    node cfgrom state. Drops topology_tree_mutex around the calls to
439  *    kmem_zalloc(). If re-locking fails, returns DDI_FAILURE, else returns
440  *    DDI_SUCCESS.
441  */
442 static int
443 s1394_alloc_cfgrom(s1394_hal_t *hal, s1394_node_t *node, s1394_status_t *status)
444 {
445 	uint32_t *cfgrom;
446 
447 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
448 
449 	*status = S1394_NOSTATUS;
450 
451 	/*
452 	 * if cfgrom is non-NULL, this has to be generation changed
453 	 * case (where we allocate cfgrom again to reread the cfgrom)
454 	 */
455 	ASSERT(node->cfgrom == NULL || (node->cfgrom != NULL &&
456 	    CFGROM_GEN_CHANGED(node) == B_TRUE));
457 
458 	/*
459 	 * if node matched, either cfgrom has to be NULL or link should be
460 	 * off in the last matched node or config rom generations changed.
461 	 */
462 	ASSERT(NODE_MATCHED(node) == B_FALSE || (NODE_MATCHED(node) == B_TRUE &&
463 	    (node->cfgrom == NULL || LINK_ACTIVE(node->old_node) == B_FALSE) ||
464 	    CFGROM_GEN_CHANGED(node) == B_TRUE));
465 
466 	s1394_unlock_tree(hal);
467 	cfgrom = (uint32_t *)kmem_zalloc(IEEE1394_CONFIG_ROM_SZ, KM_SLEEP);
468 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
469 		kmem_free(cfgrom, IEEE1394_CONFIG_ROM_SZ);
470 		*status |= S1394_LOCK_FAILED;
471 		return (DDI_FAILURE);
472 	}
473 	node->cfgrom = cfgrom;
474 	node->cfgrom_size = IEEE1394_CONFIG_ROM_QUAD_SZ;
475 	SET_CFGROM_NEW_ALLOC(node);
476 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
477 	return (DDI_SUCCESS);
478 }
479 
480 /*
481  * s1394_free_cfgrom()
482  *    Marks the config rom invalid and frees up the config based on otpions.
483  */
484 void
485 s1394_free_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
486     s1394_free_cfgrom_t options)
487 {
488 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
489 	ASSERT(node->cfgrom != NULL);
490 
491 	if (options == S1394_FREE_CFGROM_BOTH) {
492 		/*
493 		 * free in both old and new trees; will be called with
494 		 * new node.
495 		 */
496 		s1394_node_t *onode = node->old_node;
497 
498 		if (NODE_MATCHED(node) == B_TRUE && onode->cfgrom != NULL)
499 			ASSERT(onode->cfgrom == node->cfgrom);
500 
501 		if (onode != NULL && onode->cfgrom != NULL && onode->cfgrom !=
502 		    node->cfgrom)
503 			kmem_free(onode->cfgrom, IEEE1394_CONFIG_ROM_SZ);
504 
505 		kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
506 		onode->cfgrom = NULL;
507 		node->cfgrom = NULL;
508 
509 		CLEAR_CFGROM_STATE(onode);
510 		CLEAR_CFGROM_STATE(node);
511 
512 	} else if (options == S1394_FREE_CFGROM_NEW) {
513 
514 		ASSERT(CFGROM_NEW_ALLOC(node) == B_TRUE);
515 		kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
516 		CLEAR_CFGROM_NEW_ALLOC(node);
517 		node->cfgrom = NULL;
518 		CLEAR_CFGROM_STATE(node);
519 
520 	} else if (options == S1394_FREE_CFGROM_OLD) {
521 
522 		/* freeing in old tree */
523 		kmem_free(node->cfgrom, IEEE1394_CONFIG_ROM_SZ);
524 		node->cfgrom = NULL;
525 		CLEAR_CFGROM_STATE(node);
526 	}
527 }
528 
529 /*
530  * s1394_copy_cfgrom()
531  *    Copies config rom info from "from" node to "to" node. Clears
532  *    CFGROM_NEW_ALLOC bit in cfgrom state in bothe nodes. (CFGROM_NEW_ALLOC
533  *    acts as a reference count. If set, only the node in the current tree
534  *    has a pointer to it; if clear, both the node in the current tree as
535  *    well as the corresponding node in the old tree point to the same memory).
536  */
537 void
538 s1394_copy_cfgrom(s1394_node_t *to, s1394_node_t *from)
539 {
540 	ASSERT(to->cfgrom == NULL);
541 
542 	to->cfgrom = from->cfgrom;
543 	to->cfgrom_state = from->cfgrom_state;
544 	to->cfgrom_valid_size = from->cfgrom_valid_size;
545 	to->cfgrom_size = from->cfgrom_size;
546 	to->node_state = from->node_state;
547 
548 	bcopy(from->dir_stack, to->dir_stack,
549 	    offsetof(s1394_node_t, cfgrom_quad_to_read) -
550 	    offsetof(s1394_node_t, dir_stack));
551 
552 	to->cfgrom_quad_to_read = from->cfgrom_quad_to_read;
553 
554 	CLEAR_CFGROM_NEW_ALLOC(to);
555 	CLEAR_CFGROM_NEW_ALLOC(from);
556 
557 	/*
558 	 * old link off, new link on => handled in s1394_cfgrom_scan_phase1
559 	 * old link on, new link off => handled in s1394_process_old_tree
560 	 */
561 	if (LINK_ACTIVE(from) == B_FALSE) {
562 		/*
563 		 * if last time around, link was off, there wouldn't
564 		 * have been config rom allocated.
565 		 */
566 		ASSERT(from->cfgrom == NULL);
567 		return;
568 	} else {
569 		s1394_selfid_pkt_t *selfid_pkt = to->selfid_packet;
570 
571 		if (IEEE1394_SELFID_ISLINKON(selfid_pkt))
572 			SET_LINK_ACTIVE(to);
573 	}
574 }
575 
576 /*
577  * s1394_read_bus_info_blk()
578  *    Attempts to kick off reading IEEE1212_NODE_CAP_QUAD quad or quad 0.
579  *    Increments cfgroms_being_read by 1. Returns DDI_SUCCESS command was
580  *    issued, else sets status to the failure reason and returns DDI_FAILURE.
581  */
582 static int
583 s1394_read_bus_info_blk(s1394_hal_t *hal, s1394_node_t *node,
584     s1394_status_t *status)
585 {
586 	uint32_t quadlet;
587 	cmd1394_cmd_t *cmd;
588 	uchar_t node_num;
589 
590 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
591 	ASSERT(LINK_ACTIVE(node) == B_TRUE);
592 
593 	node_num = node->node_num;
594 
595 	/*
596 	 * drop the topology lock around command allocation. Return failure
597 	 * if either command allocation fails or cannot reacquire the lock
598 	 */
599 	s1394_unlock_tree(hal);
600 	*status = S1394_NOSTATUS;
601 
602 	if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
603 		*status |= S1394_CMD_ALLOC_FAILED;
604 	}
605 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
606 		*status |= S1394_LOCK_FAILED;
607 		/* free the cmd allocated above */
608 		if (((*status) & S1394_CMD_ALLOC_FAILED) != 0)
609 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
610 	}
611 	if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
612 		return (DDI_FAILURE);
613 	}
614 
615 	/* allocate cfgrom if needed */
616 	if (node->cfgrom == NULL && s1394_alloc_cfgrom(hal, node, status) !=
617 	    DDI_SUCCESS) {
618 		ASSERT(((*status) & S1394_LOCK_FAILED) != 0);
619 		(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
620 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
621 		return (DDI_FAILURE);
622 	}
623 
624 	/*
625 	 * if this is a matched node, read quad 2 (node capabilities) to
626 	 * see if the generation count changed.
627 	 */
628 	quadlet = CFGROM_BIB_READ(node) ? IEEE1212_NODE_CAP_QUAD : 0;
629 
630 	/*
631 	 * read bus info block at 100Mbit. This will help us with the cases
632 	 * where LINK is slower than PHY; s1394 uses PHY speed till speed map
633 	 * is updated.
634 	 */
635 	cmd->completion_callback = s1394_cfgrom_read_callback;
636 	cmd->bus_generation = hal->generation_count;
637 	cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
638 	    CMD1394_OVERRIDE_ADDR | CMD1394_OVERRIDE_SPEED);
639 	cmd->cmd_speed = IEEE1394_S100;
640 	cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
641 
642 	QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
643 	    quadlet, cmd->cmd_addr);
644 
645 	SETUP_QUAD_READ(node, 1, quadlet, 1);
646 	if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
647 		/* free the command if it wasn't handed over to the HAL */
648 		if (((*status) & S1394_CMD_INFLIGHT) == 0) {
649 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
650 		}
651 		if (((*status) & S1394_LOCK_FAILED) != 0) {
652 			ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
653 		}
654 		return (DDI_FAILURE);
655 	}
656 
657 	hal->cfgroms_being_read++;
658 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
659 
660 	return (DDI_SUCCESS);
661 }
662 
663 /*
664  * s1394_read_rest_of_cfgrom()
665  *    Attempts to start reading node->cfgrom_quad_to_read quadlet. Increments
666  *    cfgroms_being_read by 1 and returns DDI_SUCCESS if command was issued,
667  *    else sets status to the failure reason and returns DDI_FAILURE.
668  */
669 int
670 s1394_read_rest_of_cfgrom(s1394_hal_t *hal, s1394_node_t *node,
671     s1394_status_t *status)
672 {
673 	cmd1394_cmd_t *cmd;
674 	uchar_t node_num = node->node_num;
675 
676 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
677 	ASSERT(LINK_ACTIVE(node) == B_TRUE);
678 
679 	/*
680 	 * drop the topology lock around command allocation. Return failure
681 	 * if either command allocation fails or cannot reacquire the lock
682 	 */
683 	s1394_unlock_tree(hal);
684 	*status = S1394_NOSTATUS;
685 
686 	if (s1394_alloc_cmd(hal, 0, &cmd) != DDI_SUCCESS) {
687 		*status |= S1394_CMD_ALLOC_FAILED;
688 	}
689 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
690 		*status |= S1394_LOCK_FAILED;
691 		/* free if we allocated a cmd above */
692 		if (((*status) & S1394_CMD_ALLOC_FAILED) == 0)
693 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
694 	}
695 	if (((*status) & (S1394_CMD_ALLOC_FAILED | S1394_LOCK_FAILED)) != 0) {
696 		return (DDI_FAILURE);
697 	}
698 
699 	cmd->completion_callback = s1394_cfgrom_read_callback;
700 	cmd->bus_generation = hal->generation_count;
701 	cmd->cmd_options = (CMD1394_CANCEL_ON_BUS_RESET |
702 	    CMD1394_OVERRIDE_ADDR);
703 	cmd->cmd_type = CMD1394_ASYNCH_RD_QUAD;
704 
705 	QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num,
706 	    node->cfgrom_quad_to_read, cmd->cmd_addr);
707 	SETUP_QUAD_READ(node, 1, node->cfgrom_quad_to_read, 1);
708 	if (s1394_read_config_quadlet(hal, cmd, status) != DDI_SUCCESS) {
709 		/* free the command if it wasn't handed over to the HAL */
710 		if (((*status) & S1394_CMD_INFLIGHT) == 0) {
711 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
712 		}
713 		if (((*status) & S1394_LOCK_FAILED) != 0) {
714 			ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
715 		}
716 		return (DDI_FAILURE);
717 	}
718 
719 	hal->cfgroms_being_read++;
720 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
721 
722 	return (DDI_SUCCESS);
723 }
724 
725 /*
726  * s1394_cfgrom_scan_phase1()
727  *    Attempts to read bus info blocks for nodes as needed. Returns DDI_FAILURE
728  *    if bus reset generations changed (as indicated by s1394_lock_tree()
729  *    return status) or if any of the callees return failure, else returns
730  *    DDI_SUCCESS.
731  */
732 static int
733 s1394_cfgrom_scan_phase1(s1394_hal_t *hal)
734 {
735 	uint32_t number_of_nodes;
736 	int ret;
737 	int node;
738 	int wait_in_gen;
739 	int wait_for_cbs;
740 	uint_t hal_node_num;
741 	uint_t hal_node_num_old;
742 	s1394_node_t *nnode, *onode;
743 	s1394_selfid_pkt_t *selfid_pkt;
744 	s1394_status_t status;
745 
746 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
747 
748 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
749 		return (DDI_FAILURE);
750 	}
751 	wait_for_cbs = 0;
752 	number_of_nodes = hal->number_of_nodes;
753 	hal->cfgroms_being_read = 0;
754 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
755 	hal_node_num_old = IEEE1394_NODE_NUM(hal->old_node_id);
756 	s1394_unlock_tree(hal);
757 
758 	ret = DDI_SUCCESS;
759 
760 	/* Send requests for all new node config ROM 0 */
761 	for (node = 0; node < number_of_nodes; node++) {
762 
763 		status = S1394_UNKNOWN;
764 
765 		if (s1394_lock_tree(hal) != DDI_SUCCESS) {
766 			status = S1394_LOCK_FAILED;
767 			break;
768 		}
769 
770 		nnode = &hal->topology_tree[node];
771 		onode = nnode->old_node;
772 		/* if node matched, onode should be non NULL */
773 		ASSERT(NODE_MATCHED(nnode) == B_FALSE || (NODE_MATCHED(nnode) ==
774 		    B_TRUE && onode != NULL));
775 
776 		/*
777 		 * Read bus info block if it is a brand new node (MATCHED is 0)
778 		 * or if matched but link was off in previous generations or
779 		 * or if matched but had invalid cfgrom in last generation
780 		 * or if matched but config rom generation > 1 (this is to
781 		 * check if config rom generation changed between bus resets).
782 		 */
783 		if ((node != hal_node_num) &&
784 		    ((NODE_MATCHED(nnode) == B_FALSE) ||
785 		    (NODE_MATCHED(nnode) == B_TRUE && LINK_ACTIVE(onode) ==
786 		    B_FALSE) || (NODE_MATCHED(nnode) == B_TRUE &&
787 		    (onode->cfgrom == NULL || CFGROM_VALID(onode) ==
788 		    B_FALSE)) || (NODE_MATCHED(nnode) == B_TRUE &&
789 		    nnode->cfgrom != NULL && CONFIG_ROM_GEN(nnode->cfgrom) >
790 		    1))) {
791 
792 			SET_NODE_VISITED(nnode);
793 			selfid_pkt = nnode->selfid_packet;
794 			if (IEEE1394_SELFID_ISLINKON(selfid_pkt)) {
795 
796 				SET_LINK_ACTIVE(nnode);
797 
798 				status = S1394_UNKNOWN;
799 
800 				if (s1394_read_bus_info_blk(hal, nnode,
801 				    &status) != DDI_SUCCESS) {
802 					if ((status & S1394_LOCK_FAILED) != 0)
803 						break;
804 				} else {
805 					wait_for_cbs++;
806 					wait_in_gen = hal->br_cfgrom_read_gen;
807 				}
808 			} else {
809 				/*
810 				 * Special case: if link was active last
811 				 * time around, this should be treated as
812 				 * node going away.
813 				 */
814 				CLEAR_LINK_ACTIVE(nnode);
815 				if (NODE_MATCHED(nnode) == B_TRUE &&
816 				    LINK_ACTIVE(onode) == B_TRUE) {
817 					CLEAR_CFGROM_STATE(nnode);
818 				}
819 			}
820 		} else {
821 			if (node == hal_node_num) {
822 				onode = &hal->old_tree[hal_node_num_old];
823 				/* Set up the local matched nodes */
824 				if (onode) {
825 					nnode->old_node = onode;
826 					SET_NODE_MATCHED(nnode);
827 					SET_NODE_MATCHED(onode);
828 					s1394_copy_cfgrom(nnode, onode);
829 				}
830 			}
831 		}
832 		s1394_unlock_tree(hal);
833 	}
834 
835 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
836 
837 	if ((status & S1394_LOCK_FAILED) != 0) {
838 		return (DDI_FAILURE);
839 	}
840 
841 	/*
842 	 * If we started any reads, wait for completion callbacks
843 	 */
844 	if (wait_for_cbs != 0) {
845 		ret = s1394_wait_for_cfgrom_callbacks(hal, wait_in_gen,
846 		    s1394_br_thread_handle_cmd_phase1);
847 	}
848 
849 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
850 
851 	return (ret);
852 }
853 
854 /*
855  * s1394_br_thread_handle_cmd_phase1()
856  *    Process the cmd completion for phase 1 config rom reads. If we
857  *    successfully read IEEE1212_NODE_CAP_QUAD quadlet and config rom gen
858  *    did not change, move targets hanging off the old node to the current
859  *    node. If config rom generations change, alloc new config rom and start
860  *    re-reading the new config rom. If all of bus info block is read (as
861  *    required), mark the node as CFGROM_BIB_READ. If config rom read fails
862  *    retry if not too many failures. Topology tree mutex is dropped and
863  *    reacquired in this routine. If reacquiring fails, returns
864  *    S1394_HCMD_LOCK_FAILED. If the entire bus info block is read, returns
865  *    S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
866  *    indicate not done with the node yet).
867  *
868  *    If we cannot read any of the quadlets in the bus info block, cfgrom
869  *    is marked invalid in this generation (a side effect of calling
870  *    s1394_free_cfgrom()). We free cfgrom in this routine only if the failure
871  *    is not due to bus generations changing.
872  */
873 static hcmd_ret_t
874 s1394_br_thread_handle_cmd_phase1(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
875 {
876 	s1394_target_t *t;
877 	s1394_node_t *node, *onode;
878 	uint32_t node_num, quadlet, data;
879 	int freecmd, done, locked;
880 	hcmd_ret_t cmdret;
881 	uchar_t readdelay;
882 	s1394_status_t status;
883 
884 	s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
885 	ASSERT(quadlet == 0 || quadlet < IEEE1394_BIB_QUAD_SZ);
886 
887 	cmdret = S1394_HCMD_NODE_EXPECT_MORE;
888 
889 	locked = 1;
890 	freecmd = 1;
891 
892 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
893 		locked = 0;
894 		goto bail;
895 	}
896 
897 	node = &hal->topology_tree[node_num];
898 
899 	if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
900 
901 		int reread = 0;
902 
903 		done = 0;
904 
905 		if (quadlet == IEEE1212_NODE_CAP_QUAD &&
906 		    CFGROM_BIB_READ(node)) {
907 
908 			int cur_gen = ((data & IEEE1394_BIB_GEN_MASK) >>
909 			    IEEE1394_BIB_GEN_SHIFT);
910 
911 			/*
912 			 * node->old_node can be NULL if this is a new node &
913 			 * we are doing a rescan
914 			 */
915 			onode = node->old_node;
916 			if (CONFIG_ROM_GEN(node->cfgrom) == cur_gen) {
917 
918 				if (CFGROM_PARSED(node) == B_TRUE) {
919 					rw_enter(&hal->target_list_rwlock,
920 					    RW_WRITER);
921 					/* Update the target list, if any */
922 					if (onode != NULL &&
923 					    (t = onode->target_list) != NULL) {
924 						node->target_list = t;
925 						while (t != NULL) {
926 							t->on_node = node;
927 							t = t->target_sibling;
928 						}
929 					}
930 					rw_exit(&hal->target_list_rwlock);
931 				}
932 				SET_NODE_MATCHED(node);
933 				if (onode)
934 					SET_NODE_MATCHED(onode);
935 				node->cfgrom_quad_to_read =
936 				    IEEE1394_BIB_QUAD_SZ;
937 				done++;
938 			} else {
939 
940 				SET_CFGROM_GEN_CHANGED(node);
941 				if (onode != NULL)
942 					SET_CFGROM_GEN_CHANGED(onode);
943 				/*
944 				 * Reset BIB_READ flag and start reading entire
945 				 * config rom.
946 				 */
947 				CLEAR_CFGROM_BIB_READ(node);
948 				reread = 1;
949 
950 				/*
951 				 * if generations changed, allocate cfgrom for
952 				 * the new generation. s1394_match_GUID() will
953 				 * free up the cfgrom from the old generation.
954 				 */
955 				if (s1394_alloc_cfgrom(hal, node, &status) !=
956 				    DDI_SUCCESS) {
957 					ASSERT((status & S1394_LOCK_FAILED) !=
958 					    0);
959 					ASSERT(MUTEX_NOT_HELD(&hal->
960 					    topology_tree_mutex));
961 					locked = 0;
962 					/* we failed to relock the tree */
963 					goto bail;
964 				}
965 			}
966 		}
967 
968 		/*
969 		 * we end up here if we don't have bus_info_blk for this
970 		 * node or if config rom generation changed.
971 		 */
972 
973 		/*
974 		 * Pass1 Rio bug workaround. Due to this bug, if we read
975 		 * past quadlet 5 of the config rom, the PCI bus gets wedged.
976 		 * Avoid the hang by not reading past quadlet 5.
977 		 * We identify a remote Rio by the node vendor id part of
978 		 * quad 3 (which is == SUNW == S1394_SUNW_OUI (0x80020)).
979 		 */
980 		if (s1394_enable_rio_pass1_workarounds != 0) {
981 			if ((quadlet == 3) && ((data >> 8) == S1394_SUNW_OUI)) {
982 				node->cfgrom_size = IEEE1394_BIB_QUAD_SZ;
983 				node->cfgrom_valid_size = IEEE1394_BIB_QUAD_SZ;
984 			}
985 		}
986 
987 		if (!done) {
988 
989 			if (reread)
990 				quadlet = 0;
991 			else
992 				node->cfgrom[quadlet++] = data;
993 
994 			/* if we don't have the entire bus_info_blk... */
995 			if (quadlet < IEEE1394_BIB_QUAD_SZ) {
996 
997 				CFGROM_GET_READ_DELAY(node, readdelay);
998 				SETUP_QUAD_READ(node, 1, quadlet, 1);
999 				s1394_unlock_tree(hal);
1000 				CFGROM_READ_PAUSE(readdelay);
1001 				/* get next quadlet */
1002 				if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1003 					locked = 0;
1004 				} else if (s1394_read_config_quadlet(hal, cmd,
1005 				    &status) != DDI_SUCCESS) {
1006 					/*
1007 					 * Failed to get going. If command was
1008 					 * successfully handed over to the HAL,
1009 					 * don't free it (it will get freed
1010 					 * later in the callback).
1011 					 */
1012 					if ((status & S1394_CMD_INFLIGHT) !=
1013 					    0) {
1014 						freecmd = 0;
1015 					}
1016 					if ((status & S1394_LOCK_FAILED) != 0) {
1017 						locked = 0;
1018 					} else {
1019 						if (CFGROM_NEW_ALLOC(node) ==
1020 						    B_TRUE) {
1021 							s1394_free_cfgrom(hal,
1022 							    node,
1023 							S1394_FREE_CFGROM_NEW);
1024 						} else {
1025 							CLEAR_CFGROM_STATE(
1026 							    node);
1027 						}
1028 					}
1029 					done++;
1030 				} else {
1031 					freecmd = 0;
1032 				}
1033 			} else {
1034 				/* got all of bus_info_blk */
1035 				SET_CFGROM_BIB_READ(node);
1036 				if (node->cfgrom_size == IEEE1394_BIB_QUAD_SZ)
1037 				    SET_CFGROM_ALL_READ(node);
1038 				node->cfgrom_quad_to_read = quadlet;
1039 				done++;
1040 			}
1041 		}
1042 	} else {
1043 		done = 1;
1044 		node->cfgrom_read_fails++;
1045 		BUMP_CFGROM_READ_DELAY(node);
1046 
1047 		/* retry if not too many failures */
1048 		if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
1049 			CFGROM_GET_READ_DELAY(node, readdelay);
1050 			SETUP_QUAD_READ(node, 0, quadlet, 1);
1051 			s1394_unlock_tree(hal);
1052 			CFGROM_READ_PAUSE(readdelay);
1053 			if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1054 				locked = 0;
1055 			} else if (s1394_read_config_quadlet(hal, cmd,
1056 			    &status) != DDI_SUCCESS) {
1057 				/*
1058 				 * Failed to get going. If command was
1059 				 * successfully handed over to the HAL,
1060 				 * don't free it (it will get freed
1061 				 * later in the callback).
1062 				 */
1063 				if ((status & S1394_CMD_INFLIGHT) != 0) {
1064 					freecmd = 0;
1065 				}
1066 				if ((status & S1394_LOCK_FAILED) != 0) {
1067 					locked = 0;
1068 				} else {
1069 					if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
1070 						s1394_free_cfgrom(hal, node,
1071 						    S1394_FREE_CFGROM_NEW);
1072 					} else {
1073 						CLEAR_CFGROM_STATE(node);
1074 					}
1075 				}
1076 			} else {
1077 				done = 0;
1078 				freecmd = 0;
1079 			}
1080 		} else {
1081 			if (CFGROM_NEW_ALLOC(node) == B_TRUE) {
1082 				s1394_free_cfgrom(hal, node,
1083 				    S1394_FREE_CFGROM_NEW);
1084 			} else {
1085 				CLEAR_CFGROM_STATE(node);
1086 			}
1087 		}
1088 	}
1089 bail:
1090 	if (freecmd) {
1091 		(void) s1394_free_cmd(hal, &cmd);
1092 	}
1093 
1094 	if (done) {
1095 		cmdret = S1394_HCMD_NODE_DONE;
1096 	}
1097 
1098 	/* if we are bailing out because locking failed, locked == 0 */
1099 	if (locked == 0)
1100 		cmdret = S1394_HCMD_LOCK_FAILED;
1101 	else
1102 		s1394_unlock_tree(hal);
1103 
1104 	return (cmdret);
1105 }
1106 
1107 /*
1108  * s1394_cfgrom_scan_phase2()
1109  *    Handles phase 2 of bus reset processing. Matches GUIDs between old
1110  *    and new topology trees to identify which node moved where. Processes
1111  *    the old topology tree (involves offlining any nodes that got unplugged
1112  *    between the last generation and the current generation). Updates speed
1113  *    map, sets up physical AR request filer and does isoch resource
1114  *    realloc failure notification and bus reset notifications. Then resends
1115  *    any commands that were issued by targets while the reset was being
1116  *    processed. Finally, the current topology tree is processed. This involves
1117  *    reading config rom past the bus info block for new nodes and parsing
1118  *    the config rom, creating a devinfo for each unit directory found in the
1119  *    config rom.
1120  *    Returns DDI_FAILURE if there was bus reset during any of the function
1121  *    calls (as indicated by lock failures) or if any of the routines callees
1122  *    return failure, else returns DDI_SUCCESS.
1123  */
1124 static int
1125 s1394_cfgrom_scan_phase2(s1394_hal_t *hal)
1126 {
1127 	int ret;
1128 	uint_t wait_gen;
1129 	int wait_for_cbs = 0;
1130 	t1394_localinfo_t localinfo;
1131 
1132 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1133 
1134 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1135 		return (DDI_FAILURE);
1136 	}
1137 
1138 	if (s1394_match_all_GUIDs(hal) == DDI_SUCCESS) {
1139 		s1394_unlock_tree(hal);
1140 	}
1141 
1142 	if (s1394_process_old_tree(hal) != DDI_SUCCESS) {
1143 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1144 		return (DDI_FAILURE);
1145 	}
1146 
1147 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1148 		return (DDI_FAILURE);
1149 	}
1150 
1151 	s1394_update_speed_map_link_speeds(hal);
1152 	s1394_unlock_tree(hal);
1153 
1154 	/* Setup physical AR request filters */
1155 	s1394_physical_arreq_setup_all(hal);
1156 
1157 	/* Notify targets of isoch resource realloc failures */
1158 	s1394_isoch_rsrc_realloc_notify(hal);
1159 
1160 	/* Notify targets of the end of bus reset processing */
1161 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1162 		return (DDI_FAILURE);
1163 	}
1164 
1165 	localinfo.bus_generation = hal->generation_count;
1166 	localinfo.local_nodeID = hal->node_id;
1167 
1168 	s1394_unlock_tree(hal);
1169 	s1394_target_bus_reset_notifies(hal, &localinfo);
1170 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1171 		return (DDI_FAILURE);
1172 	}
1173 
1174 	/* Set HAL state to normal */
1175 	if (hal->disable_requests_bit == 0)
1176 		hal->hal_state = S1394_HAL_NORMAL;
1177 	else
1178 		hal->hal_state = S1394_HAL_DREQ;
1179 
1180 	s1394_unlock_tree(hal);
1181 
1182 	/* Flush the pending Q */
1183 	s1394_resend_pending_cmds(hal);
1184 
1185 	if (s1394_process_topology_tree(hal, &wait_for_cbs, &wait_gen)) {
1186 		ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1187 		return (DDI_FAILURE);
1188 	}
1189 
1190 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1191 		return (DDI_FAILURE);
1192 	}
1193 
1194 	s1394_print_node_info(hal);
1195 
1196 	s1394_unlock_tree(hal);
1197 
1198 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1199 
1200 	ret = DDI_SUCCESS;
1201 
1202 	/*
1203 	 * If we started any reads, wait for completion callbacks
1204 	 */
1205 	if (wait_for_cbs != 0) {
1206 		ret = s1394_wait_for_cfgrom_callbacks(hal, wait_gen,
1207 		    s1394_br_thread_handle_cmd_phase2);
1208 	}
1209 
1210 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1211 
1212 	return (ret);
1213 }
1214 
1215 /*
1216  * s1394_br_thread_handle_cmd_phase2()
1217  *    Process the cmd completion for phase 2 config rom reads. If all the
1218  *    needed quads are read, validates the config rom; if config rom is
1219  *    invalid (crc failures), frees the config rom, else marks the config rom
1220  *    valid and calls s1394_update_devinfo_tree() to parse the config rom.
1221  *    If need to get more quadlets, attempts to kick off the read and returns
1222  *    S1394_HCMD_NODE_EXPECT_MORE if successfully started the read. If a bus
1223  *    reset is seen while in this routine, returns S1394_HCMD_LOCK_FAILED. If
1224  *    done with the node (with or withoug crc errors), returns
1225  *    S1394_HCMD_NODE_DONE, else returns S1394_HCMD_NODE_EXPECT_MORE (to
1226  *    indicate not done with the node yet).
1227  */
1228 static hcmd_ret_t
1229 s1394_br_thread_handle_cmd_phase2(s1394_hal_t *hal, cmd1394_cmd_t *cmd)
1230 {
1231 	s1394_node_t *node;
1232 	uint32_t node_num, quadlet, data;
1233 	int update_devinfo, locked, freecmd, done;
1234 	hcmd_ret_t cmdret;
1235 	uchar_t readdelay;
1236 	s1394_status_t status;
1237 
1238 	/*
1239 	 * we end up here if this is a brand new node or if it is a known node
1240 	 * but the config ROM changed (and triggered a re-read).
1241 	 */
1242 	s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
1243 	ASSERT(quadlet == IEEE1394_BIB_QUAD_SZ || quadlet <
1244 	    IEEE1394_CONFIG_ROM_QUAD_SZ);
1245 
1246 	locked = freecmd = done = 1;
1247 	cmdret = S1394_HCMD_NODE_EXPECT_MORE;
1248 
1249 	update_devinfo = 0;
1250 
1251 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1252 		locked = 0;
1253 		goto bail;
1254 	}
1255 
1256 	node = &hal->topology_tree[node_num];
1257 
1258 	if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
1259 
1260 		ASSERT(CFGROM_BIB_READ(node) == B_TRUE);
1261 
1262 		node->cfgrom[quadlet] = data;
1263 
1264 		if (s1394_calc_next_quad(hal, node, quadlet, &quadlet) != 0) {
1265 			/*
1266 			 * Done with this node. Mark config rom valid and
1267 			 * update the devinfo tree for this node.
1268 			 */
1269 			node->cfgrom_valid_size = quadlet + 1;
1270 			if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
1271 				SET_CFGROM_ALL_READ(node);
1272 				update_devinfo++;
1273 			} else {
1274 				s1394_free_cfgrom(hal, node,
1275 				    S1394_FREE_CFGROM_BOTH);
1276 			}
1277 		} else {
1278 			CFGROM_GET_READ_DELAY(node, readdelay);
1279 			SETUP_QUAD_READ(node, 1, quadlet, 1);
1280 			s1394_unlock_tree(hal);
1281 			CFGROM_READ_PAUSE(readdelay);
1282 			if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1283 				locked = 0;
1284 			} else if (s1394_read_config_quadlet(hal, cmd,
1285 			    &status) != DDI_SUCCESS) {
1286 				/* give up on this guy */
1287 				if ((status & S1394_CMD_INFLIGHT) != 0) {
1288 					freecmd = 0;
1289 				}
1290 				if ((status & S1394_LOCK_FAILED) != 0) {
1291 					locked = 0;
1292 				} else {
1293 					node->cfgrom_valid_size = quadlet;
1294 					if (s1394_valid_cfgrom(hal, node) ==
1295 					    B_TRUE) {
1296 						SET_CFGROM_ALL_READ(node);
1297 						update_devinfo++;
1298 					} else {
1299 						s1394_free_cfgrom(hal, node,
1300 						    S1394_FREE_CFGROM_BOTH);
1301 					}
1302 				}
1303 			} else {
1304 				/* successfully started next read */
1305 				done = 0;
1306 				freecmd = 0;
1307 			}
1308 		}
1309 	} else {
1310 		node->cfgrom_read_fails++;
1311 		BUMP_CFGROM_READ_DELAY(node);
1312 
1313 		/* retry if not too many failures */
1314 		if (node->cfgrom_read_fails < s1394_cfgrom_read_retry_cnt) {
1315 			CFGROM_GET_READ_DELAY(node, readdelay);
1316 			s1394_unlock_tree(hal);
1317 			SETUP_QUAD_READ(node, 0, quadlet, 1);
1318 			CFGROM_READ_PAUSE(readdelay);
1319 			if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1320 				locked = 0;
1321 			} else if (s1394_read_config_quadlet(hal, cmd,
1322 			    &status) != DDI_SUCCESS) {
1323 				if ((status & S1394_CMD_INFLIGHT) != 0) {
1324 					freecmd = 0;
1325 				}
1326 				if ((status & S1394_LOCK_FAILED) != 0) {
1327 					locked = 0;
1328 				} else {
1329 					/* stop further reads */
1330 					node->cfgrom_valid_size = quadlet + 1;
1331 					if (s1394_valid_cfgrom(hal, node) ==
1332 					    B_TRUE) {
1333 						SET_CFGROM_ALL_READ(node);
1334 						update_devinfo++;
1335 					} else {
1336 						s1394_free_cfgrom(hal, node,
1337 						    S1394_FREE_CFGROM_BOTH);
1338 					}
1339 				}
1340 			} else {
1341 				/* successfully started next read */
1342 				done = 0;
1343 				freecmd = 0;
1344 			}
1345 		} else {
1346 			node->cfgrom_valid_size = quadlet + 1;
1347 			if (s1394_valid_cfgrom(hal, node) == B_TRUE) {
1348 				SET_CFGROM_ALL_READ(node);
1349 				update_devinfo++;
1350 			} else {
1351 				s1394_free_cfgrom(hal, node,
1352 				    S1394_FREE_CFGROM_BOTH);
1353 			}
1354 		}
1355 	}
1356 bail:
1357 	if (freecmd) {
1358 		(void) s1394_free_cmd(hal, &cmd);
1359 	}
1360 
1361 	if (done) {
1362 		cmdret = S1394_HCMD_NODE_DONE;
1363 	}
1364 
1365 	if (update_devinfo) {
1366 		ASSERT(locked);
1367 		/*
1368 		 * s1394_update_devinfo_tree() drops and reacquires the
1369 		 * topology_tree_mutex. If tree lock fails, it returns
1370 		 * a DDI_FAILURE. Set locked to 0 so in this case so that
1371 		 * we will return S1394_HCMD_LOCK_FAILED below
1372 		 */
1373 		if (s1394_update_devinfo_tree(hal, node) != DDI_SUCCESS) {
1374 			ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
1375 			locked = 0;
1376 		}
1377 	}
1378 
1379 	/* if we are bailing out because locking failed, locked == 0 */
1380 	if (locked == 0)
1381 		cmdret = S1394_HCMD_LOCK_FAILED;
1382 	else
1383 		s1394_unlock_tree(hal);
1384 
1385 	return (cmdret);
1386 }
1387 
1388 /*
1389  * s1394_read_config_quadlet()
1390  *    Starts the reads of a config quadlet (deduced cmd_addr).  Returns
1391  *    DDI_SUCCESS if the read was started with no errors, else DDI_FAILURE
1392  *    is returned, with status indicating the reason for the failure(s).
1393  */
1394 static int
1395 s1394_read_config_quadlet(s1394_hal_t *hal, cmd1394_cmd_t *cmd,
1396     s1394_status_t *status)
1397 {
1398 	s1394_node_t *node;
1399 	int ret, err, node_num, quadlet;
1400 
1401 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1402 	node_num = IEEE1394_ADDR_PHY_ID(cmd->cmd_addr);
1403 	node = &hal->topology_tree[node_num];
1404 	quadlet = node->cfgrom_quad_to_read;
1405 
1406 	/* Calculate the 64-bit address */
1407 	QUAD_TO_CFGROM_ADDR(IEEE1394_LOCAL_BUS, node_num, quadlet,
1408 	    cmd->cmd_addr);
1409 
1410 	*status = S1394_NOSTATUS;
1411 
1412 	ret = s1394_setup_asynch_command(hal, NULL, cmd, S1394_CMD_READ, &err);
1413 
1414 	if (ret != DDI_SUCCESS) {
1415 		*status |= S1394_UNKNOWN;
1416 		ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1417 		return (DDI_FAILURE);
1418 	}
1419 
1420 	s1394_unlock_tree(hal);
1421 	ret = DDI_SUCCESS;
1422 	/* Send the command out */
1423 	if (s1394_xfer_asynch_command(hal, cmd, &err) == DDI_SUCCESS) {
1424 		/* Callers can expect a callback now */
1425 		*status |= S1394_CMD_INFLIGHT;
1426 	} else {
1427 
1428 		s1394_cmd_priv_t *s_priv;
1429 
1430 		/* Remove from queue */
1431 		s1394_remove_q_asynch_cmd(hal, cmd);
1432 		s_priv = S1394_GET_CMD_PRIV(cmd);
1433 
1434 		s_priv->cmd_in_use = B_FALSE;
1435 
1436 		*status |= S1394_XFER_FAILED;
1437 		ret = DDI_FAILURE;
1438 	}
1439 
1440 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
1441 		*status |= S1394_LOCK_FAILED;
1442 		ret = DDI_FAILURE;
1443 	}
1444 
1445 	return (ret);
1446 }
1447 
1448 /*
1449  * s1394_cfgrom_read_callback()
1450  *    callback routine for config rom reads. Frees the command if it failed
1451  *    due to bus reset else appends the command to the completion queue
1452  *    and signals the completion queue cv.
1453  */
1454 static void
1455 s1394_cfgrom_read_callback(cmd1394_cmd_t *cmd)
1456 {
1457 	cmd1394_cmd_t *tcmd;
1458 	s1394_cmd_priv_t *s_priv;
1459 	s1394_hal_t *hal;
1460 
1461 #if defined(DEBUG)
1462 	uint32_t node_num, quadlet, data;
1463 #endif
1464 
1465 	/* Get the Services Layer private area */
1466 	s_priv = S1394_GET_CMD_PRIV(cmd);
1467 
1468 	hal = (s1394_hal_t *)s_priv->sent_on_hal;
1469 
1470 #if defined(DEBUG)
1471 
1472 	s1394_get_quad_info(cmd, &node_num, &quadlet, &data);
1473 
1474 #endif
1475 
1476 	if (cmd->cmd_result == CMD1394_EBUSRESET) {
1477 		(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1478 	} else {
1479 		mutex_enter(&hal->br_cmplq_mutex);
1480 
1481 		/* Put the command on completion queue */
1482 		s_priv->cmd_priv_next = NULL;
1483 		if ((tcmd = hal->br_cmplq_tail) != NULL) {
1484 			s_priv = S1394_GET_CMD_PRIV(tcmd);
1485 
1486 			s_priv->cmd_priv_next = cmd;
1487 		}
1488 
1489 		hal->br_cmplq_tail = cmd;
1490 
1491 		if (hal->br_cmplq_head == NULL)
1492 			hal->br_cmplq_head = cmd;
1493 
1494 		cv_signal(&hal->br_cmplq_cv);
1495 		mutex_exit(&hal->br_cmplq_mutex);
1496 	}
1497 }
1498 
1499 /*
1500  * s1394_cfgrom_parse_unit_dir()
1501  *    Parses the unit directory passed in and returns reg[2...5] of reg
1502  *    property (see 1275 binding for reg property defintion). Currently,
1503  *    returns 0 for all the values since none of the existing devices implement
1504  *    this and future devices, per P1212r, need a binding change.
1505  */
1506 /* ARGSUSED */
1507 void
1508 s1394_cfgrom_parse_unit_dir(uint32_t *unit_dir, uint32_t *addr_hi,
1509     uint32_t *addr_lo, uint32_t *size_hi, uint32_t *size_lo)
1510 {
1511 	*addr_hi = *addr_lo = *size_hi = *size_lo = 0;
1512 }
1513 
1514 /*
1515  * s1394_get_quad_info()
1516  *    Helper routine that picks apart the various fields of a 1394 address
1517  */
1518 static void
1519 s1394_get_quad_info(cmd1394_cmd_t *cmd, uint32_t *node_num, uint32_t *quadlet,
1520     uint32_t *data)
1521 {
1522 	uint64_t addr;
1523 
1524 	addr = cmd->cmd_addr;
1525 	*node_num = IEEE1394_ADDR_PHY_ID(addr);
1526 	*quadlet = ((addr & IEEE1394_ADDR_OFFSET_MASK) -
1527 	    IEEE1394_CONFIG_ROM_ADDR);
1528 	*quadlet = (*quadlet >> 2);
1529 	*data = T1394_DATA32(cmd->cmd_u.q.quadlet_data);
1530 }
1531 
1532 /*
1533  * s1394_match_GUID()
1534  *    attempts to match nnode (which is in the current topology tree) with
1535  *    a node in the old topology tree by comparing GUIDs. If a match is found
1536  *    the old_node field of the current node and cur_node field of the old
1537  *    are set point to each other. Also, this routine makes both the nodes
1538  *    point at the same config rom.  If unable to relock the tree, returns
1539  *    DDI_FAILURE, else returns DDI_SUCCESS.
1540  */
1541 static int
1542 s1394_match_GUID(s1394_hal_t *hal, s1394_node_t *nnode)
1543 {
1544 	int old_node;
1545 	int gen_changed;
1546 	uint32_t old_a, old_b;
1547 	uint32_t new_a, new_b;
1548 	s1394_node_t *onode;
1549 	s1394_target_t *t;
1550 	int	ret = DDI_SUCCESS;
1551 
1552 	ASSERT(nnode->cfgrom != NULL);
1553 	ASSERT(CFGROM_BIB_READ(nnode));
1554 
1555 	new_a = nnode->node_guid_hi;
1556 	new_b = nnode->node_guid_lo;
1557 
1558 	for (old_node = 0; old_node < hal->old_number_of_nodes; old_node++) {
1559 
1560 		onode = &hal->old_tree[old_node];
1561 		if (onode->cfgrom == NULL || CFGROM_BIB_READ(onode) == B_FALSE)
1562 			continue;
1563 
1564 		old_a = onode->node_guid_hi;
1565 		old_b = onode->node_guid_lo;
1566 
1567 		if ((old_a == new_a) && (old_b == new_b)) {
1568 
1569 			if (NODE_MATCHED(onode) == B_TRUE) {
1570 				cmn_err(CE_NOTE, "!Duplicate GUIDs: %08x%08x",
1571 				    old_a, old_b);
1572 				/* offline the new node that last matched */
1573 				ret = s1394_offline_node(hal, onode->cur_node);
1574 				/* and make the current new node invalid */
1575 				ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
1576 				s1394_free_cfgrom(hal, nnode,
1577 				    S1394_FREE_CFGROM_NEW);
1578 				break;
1579 			}
1580 
1581 			/*
1582 			 * If there is indeed a cfgrom gen change,
1583 			 * CFGROM_GEN_CHANGED() will be set iff we are matching
1584 			 * tree nodes. Otherwise, CONFIG_ROM_GEN(old) !=
1585 			 * CONFIG_ROM_GEN(new).
1586 			 */
1587 			if (CFGROM_GEN_CHANGED(nnode) == B_TRUE ||
1588 			    (CONFIG_ROM_GEN(onode->cfgrom) !=
1589 			    CONFIG_ROM_GEN(nnode->cfgrom))) {
1590 				gen_changed = 1;
1591 			} else {
1592 				gen_changed = 0;
1593 			}
1594 
1595 			onode->cur_node = nnode;
1596 			nnode->old_node = onode;
1597 			nnode->node_state = onode->node_state;
1598 			SET_NODE_VISITED(onode);
1599 			SET_NODE_MATCHED(onode);
1600 			SET_NODE_MATCHED(nnode);
1601 			/*
1602 			 * If generations changed, need to offline any targets
1603 			 * hanging off the old node, prior to freeing up old
1604 			 * cfgrom. If the generations didn't change, we can
1605 			 * free up the new config rom and copy all info from
1606 			 * the old node (this helps in picking up further
1607 			 * reads from where the last generation left off).
1608 			 */
1609 			if (gen_changed == 1) {
1610 				if (s1394_offline_node(hal, onode)) {
1611 					ret = DDI_FAILURE;
1612 					break;
1613 				}
1614 				s1394_free_cfgrom(hal, onode,
1615 				    S1394_FREE_CFGROM_OLD);
1616 				CLEAR_CFGROM_PARSED(nnode);
1617 				CLEAR_CFGROM_NEW_ALLOC(nnode);
1618 				CLEAR_CFGROM_NEW_ALLOC(onode);
1619 				onode->cfgrom = nnode->cfgrom;
1620 				/* done */
1621 				break;
1622 			}
1623 
1624 			/*
1625 			 * Free up cfgrom memory in the new_node and
1626 			 * point it at the same config rom as the old one.
1627 			 */
1628 			if (onode->cfgrom != nnode->cfgrom) {
1629 				ASSERT(CFGROM_NEW_ALLOC(nnode) == B_TRUE);
1630 				s1394_free_cfgrom(hal, nnode,
1631 				    S1394_FREE_CFGROM_NEW);
1632 			}
1633 			nnode->cfgrom = onode->cfgrom;
1634 			nnode->cfgrom_state = onode->cfgrom_state;
1635 			nnode->cfgrom_valid_size = onode->cfgrom_valid_size;
1636 			nnode->cfgrom_size = onode->cfgrom_size;
1637 			nnode->cfgrom_quad_to_read = onode->cfgrom_quad_to_read;
1638 			bcopy(onode->dir_stack, nnode->dir_stack,
1639 			    offsetof(s1394_node_t, cfgrom_quad_to_read) -
1640 			    offsetof(s1394_node_t, dir_stack));
1641 			CLEAR_CFGROM_NEW_ALLOC(nnode);
1642 			CLEAR_CFGROM_NEW_ALLOC(onode);
1643 
1644 			if (CFGROM_PARSED(nnode) == B_TRUE) {
1645 				rw_enter(&hal->target_list_rwlock, RW_WRITER);
1646 				/* Update the target list */
1647 				if ((t = onode->target_list) != NULL) {
1648 					nnode->target_list = t;
1649 					while (t != NULL) {
1650 						t->on_node = nnode;
1651 						t = t->target_sibling;
1652 					}
1653 				}
1654 				rw_exit(&hal->target_list_rwlock);
1655 			}
1656 			break;
1657 		}
1658 	}
1659 
1660 	return (ret);
1661 }
1662 
1663 /*
1664  * s1394_match_all_GUIDs()
1665  *    attempt to match each node in the current topology tree with the a
1666  *    node in the old topology tree.  If unable to relock the tree, returns
1667  *    DDI_FAILURE, else returns DDI_SUCCESS.
1668  */
1669 static int
1670 s1394_match_all_GUIDs(s1394_hal_t *hal)
1671 {
1672 	int node;
1673 	int ret = DDI_SUCCESS;
1674 	s1394_node_t *nnode;
1675 
1676 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1677 
1678 	for (node = 0; node < hal->number_of_nodes; node++) {
1679 		nnode = &hal->topology_tree[node];
1680 		if (LINK_ACTIVE(nnode) == B_FALSE || CFGROM_BIB_READ(nnode) ==
1681 		    B_FALSE)
1682 			continue;
1683 		if (NODE_MATCHED(nnode)) {
1684 			/*
1685 			 * Skip if node matched. If config rom generations
1686 			 * changed, we want to call s1394_match_GUID() even
1687 			 * if the nodes matched.
1688 			 */
1689 			int gen_changed;
1690 			s1394_node_t *onode = nnode->old_node;
1691 
1692 			gen_changed = (onode && onode->cfgrom &&
1693 			    CONFIG_ROM_GEN(onode->cfgrom) != CONFIG_ROM_GEN(
1694 			    nnode->cfgrom)) ? 1 : 0;
1695 
1696 			if (CFGROM_GEN_CHANGED(nnode) == 0 && gen_changed == 0)
1697 				continue;
1698 		}
1699 
1700 		if (s1394_match_GUID(hal, nnode) == DDI_FAILURE) {
1701 			ret = DDI_FAILURE;
1702 		}
1703 	}
1704 
1705 	return (ret);
1706 }
1707 
1708 /*
1709  * s1394_valid_cfgrom()
1710  *    Performs crc check on the config rom. Returns B_TRUE if config rom has
1711  *    good CRC else returns B_FALSE.
1712  */
1713 /* ARGSUSED */
1714 boolean_t
1715 s1394_valid_cfgrom(s1394_hal_t *hal, s1394_node_t *node)
1716 {
1717 	uint32_t crc_len, crc_value, CRC, CRC_old, quad0;
1718 
1719 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1720 	ASSERT(node->cfgrom);
1721 
1722 	if (s1394_enable_crc_validation == 0) {
1723 		return (B_TRUE);
1724 	}
1725 
1726 	quad0 = node->cfgrom[0];
1727 	crc_len = (quad0 >> IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
1728 	    IEEE1394_CFG_ROM_CRC_LEN_MASK;
1729 	crc_value = quad0 & IEEE1394_CFG_ROM_CRC_VALUE_MASK;
1730 
1731 	if (node->cfgrom_valid_size < crc_len + 1) {
1732 		return (B_FALSE);
1733 	}
1734 
1735 	CRC = s1394_CRC16(&node->cfgrom[1], crc_len);
1736 
1737 	if (CRC != crc_value) {
1738 		CRC_old = s1394_CRC16_old(&node->cfgrom[1], crc_len);
1739 		if (CRC_old == crc_value) {
1740 			return (B_TRUE);
1741 		}
1742 
1743 		cmn_err(CE_NOTE,
1744 		    "!Bad CRC in config rom (node's GUID %08x%08x)",
1745 		    node->node_guid_hi, node->node_guid_lo);
1746 
1747 		return (B_FALSE);
1748 	}
1749 
1750 	return (B_TRUE);
1751 }
1752 
1753 /*
1754  * s1394_valid_dir()
1755  *    Performs crc check on a directory.  Returns B_TRUE if dir has good CRC
1756  *    else returns B_FALSE.
1757  */
1758 /*ARGSUSED*/
1759 boolean_t
1760 s1394_valid_dir(s1394_hal_t *hal, s1394_node_t *node,
1761     uint32_t key, uint32_t *dir)
1762 {
1763 	uint32_t dir_len, crc_value, CRC, CRC_old, quad0;
1764 
1765 	/*
1766 	 * Ideally, we would like to do crc validations for the entire cfgrom
1767 	 * as well as the individual directories. However, we have seen devices
1768 	 * that have valid directories but busted cfgrom crc and devices that
1769 	 * have bad crcs in directories as well as for the entire cfgrom. This
1770 	 * is sad, but unfortunately, real world!
1771 	 */
1772 	if (s1394_enable_crc_validation == 0) {
1773 		return (B_TRUE);
1774 	}
1775 
1776 	quad0 = dir[0];
1777 
1778 	dir_len = IEEE1212_DIR_LEN(quad0);
1779 	crc_value = IEEE1212_DIR_CRC(quad0);
1780 
1781 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
1782 
1783 	CRC = s1394_CRC16(&dir[1], dir_len);
1784 
1785 	if (CRC != crc_value) {
1786 		CRC_old = s1394_CRC16_old(&dir[1], dir_len);
1787 		if (CRC_old == crc_value) {
1788 			return (B_TRUE);
1789 		}
1790 
1791 		return (B_FALSE);
1792 	}
1793 
1794 	return (B_TRUE);
1795 }
1796 
1797 /*
1798  * s1394_become_bus_mgr()
1799  *    is a callback from a timeout() setup by the main br_thread.  After
1800  *    a bus reset, depending on the Bus Manager's incumbancy and the state
1801  *    of its abdicate bit, a timer of a certain length is set.  After this
1802  *    time expires, the local host may attempt to become the Bus Manager.
1803  *    This is done by sending a request to the current IRM on the bus.  The
1804  *    IRM holds the BUS_MANAGER_ID register.  Depending on whether or not
1805  *    the local host is already the IRM, we will send a request onto the
1806  *    1394 bus or call into the HAL.
1807  */
1808 static void
1809 s1394_become_bus_mgr(void *arg)
1810 {
1811 	s1394_hal_t	 *hal;
1812 	s1394_cmd_priv_t *s_priv;
1813 	cmd1394_cmd_t	 *cmd;
1814 	uint64_t	 Bus_Mgr_ID_addr;
1815 	uint32_t	 hal_node_num;
1816 	uint32_t	 old_value;
1817 	uint32_t	 generation;
1818 	uint_t		 curr_bus_mgr;
1819 	uint_t		 bm_node;
1820 	uint_t		 IRM_node;
1821 	int		 err;
1822 	int		 ret;
1823 
1824 	hal = (s1394_hal_t *)arg;
1825 
1826 	/* Lock the topology tree */
1827 	mutex_enter(&hal->topology_tree_mutex);
1828 
1829 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
1830 	generation   = hal->generation_count;
1831 	IRM_node = hal->IRM_node;
1832 
1833 	mutex_enter(&hal->bus_mgr_node_mutex);
1834 	bm_node = hal->bus_mgr_node;
1835 	mutex_exit(&hal->bus_mgr_node_mutex);
1836 
1837 	/* Unlock the topology tree */
1838 	mutex_exit(&hal->topology_tree_mutex);
1839 
1840 	/* Make sure we aren't already the Bus Manager */
1841 	if (bm_node != -1) {
1842 		return;
1843 	}
1844 
1845 	/* Send compare-swap to BUS_MANAGER_ID */
1846 	/* register on the Isoch Rsrc Mgr */
1847 	if (IRM_node == hal_node_num) {
1848 		/* Local */
1849 		ret = HAL_CALL(hal).csr_cswap32(hal->halinfo.hal_private,
1850 		    generation, (IEEE1394_SCSR_BUSMGR_ID &
1851 		    IEEE1394_CSR_OFFSET_MASK), S1394_INVALID_NODE_NUM,
1852 		    hal_node_num, &old_value);
1853 		if (ret != DDI_SUCCESS) {
1854 			return;
1855 		}
1856 		curr_bus_mgr = IEEE1394_NODE_NUM(old_value);
1857 
1858 		mutex_enter(&hal->bus_mgr_node_mutex);
1859 		if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
1860 		    (curr_bus_mgr == hal_node_num)) {
1861 			hal->bus_mgr_node = hal_node_num;
1862 			hal->incumbent_bus_mgr = B_TRUE;
1863 		} else {
1864 			hal->bus_mgr_node = curr_bus_mgr;
1865 			hal->incumbent_bus_mgr = B_FALSE;
1866 		}
1867 		cv_signal(&hal->bus_mgr_node_cv);
1868 		mutex_exit(&hal->bus_mgr_node_mutex);
1869 
1870 	} else {
1871 		/* Remote */
1872 		if (s1394_alloc_cmd(hal, T1394_ALLOC_CMD_NOSLEEP, &cmd) !=
1873 		    DDI_SUCCESS) {
1874 			return;
1875 		}
1876 
1877 		cmd->cmd_options	   = (CMD1394_CANCEL_ON_BUS_RESET |
1878 		    CMD1394_OVERRIDE_ADDR);
1879 		cmd->cmd_type		   = CMD1394_ASYNCH_LOCK_32;
1880 		cmd->completion_callback   = s1394_become_bus_mgr_callback;
1881 		Bus_Mgr_ID_addr		   = (IEEE1394_ADDR_BUS_ID_MASK |
1882 		    IEEE1394_SCSR_BUSMGR_ID) |
1883 		    (((uint64_t)hal->IRM_node) << IEEE1394_ADDR_PHY_ID_SHIFT);
1884 		cmd->cmd_addr		   = Bus_Mgr_ID_addr;
1885 		cmd->bus_generation	   = generation;
1886 		cmd->cmd_u.l32.arg_value   = T1394_DATA32(
1887 		    S1394_INVALID_NODE_NUM);
1888 		cmd->cmd_u.l32.data_value  = T1394_DATA32(hal_node_num);
1889 		cmd->cmd_u.l32.num_retries = 0;
1890 		cmd->cmd_u.l32.lock_type   = CMD1394_LOCK_COMPARE_SWAP;
1891 
1892 		/* Get the Services Layer private area */
1893 		s_priv = S1394_GET_CMD_PRIV(cmd);
1894 
1895 		/* Lock the topology tree */
1896 		mutex_enter(&hal->topology_tree_mutex);
1897 
1898 		ret = s1394_setup_asynch_command(hal, NULL, cmd,
1899 		    S1394_CMD_LOCK, &err);
1900 
1901 		/* Unlock the topology tree */
1902 		mutex_exit(&hal->topology_tree_mutex);
1903 
1904 		/* Command has now been put onto the queue! */
1905 		if (ret != DDI_SUCCESS) {
1906 			/* Need to free the command */
1907 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1908 			return;
1909 		}
1910 
1911 		/* Send the command out */
1912 		ret = s1394_xfer_asynch_command(hal, cmd, &err);
1913 
1914 		if (ret != DDI_SUCCESS) {
1915 			/* Remove cmd outstanding request Q */
1916 			s1394_remove_q_asynch_cmd(hal, cmd);
1917 
1918 			s_priv->cmd_in_use = B_FALSE;
1919 
1920 			mutex_enter(&hal->bus_mgr_node_mutex);
1921 
1922 			/* Don't know who the bus_mgr is */
1923 			hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
1924 			hal->incumbent_bus_mgr = B_FALSE;
1925 
1926 			cv_signal(&hal->bus_mgr_node_cv);
1927 			mutex_exit(&hal->bus_mgr_node_mutex);
1928 
1929 			/* Need to free the command */
1930 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1931 		}
1932 	}
1933 }
1934 
1935 /*
1936  * s1394_become_bus_mgr_callback()
1937  *    is the callback used by s1394_become_bus_mgr() when it is necessary
1938  *    to send the Bus Manager request to a remote IRM.  After the completion
1939  *    of the compare-swap request, this routine looks at the "old_value"
1940  *    in the request to determine whether or not it has become the Bus
1941  *    Manager for the current generation.  It sets the bus_mgr_node and
1942  *    incumbent_bus_mgr fields to their appropriate values.
1943  */
1944 static void
1945 s1394_become_bus_mgr_callback(cmd1394_cmd_t *cmd)
1946 {
1947 	s1394_cmd_priv_t *s_priv;
1948 	s1394_hal_t *hal;
1949 	uint32_t hal_node_num;
1950 	uint32_t temp;
1951 	uint_t curr_bus_mgr;
1952 
1953 	/* Get the Services Layer private area */
1954 	s_priv = S1394_GET_CMD_PRIV(cmd);
1955 
1956 	hal = (s1394_hal_t *)s_priv->sent_on_hal;
1957 
1958 	/* Lock the topology tree */
1959 	mutex_enter(&hal->topology_tree_mutex);
1960 
1961 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
1962 
1963 	/* Was the command successful? */
1964 	if (cmd->cmd_result == CMD1394_CMDSUCCESS) {
1965 		temp = T1394_DATA32(cmd->cmd_u.l32.old_value);
1966 		curr_bus_mgr = IEEE1394_NODE_NUM(temp);
1967 		mutex_enter(&hal->bus_mgr_node_mutex);
1968 		if ((curr_bus_mgr == S1394_INVALID_NODE_NUM) ||
1969 		    (curr_bus_mgr == hal_node_num)) {
1970 
1971 			hal->bus_mgr_node = hal_node_num;
1972 			hal->incumbent_bus_mgr = B_TRUE;
1973 
1974 		} else {
1975 			hal->bus_mgr_node = curr_bus_mgr;
1976 			hal->incumbent_bus_mgr = B_FALSE;
1977 		}
1978 		cv_signal(&hal->bus_mgr_node_cv);
1979 		mutex_exit(&hal->bus_mgr_node_mutex);
1980 
1981 	} else {
1982 		mutex_enter(&hal->bus_mgr_node_mutex);
1983 
1984 		/* Don't know who the bus_mgr is */
1985 		hal->bus_mgr_node = S1394_INVALID_NODE_NUM;
1986 		hal->incumbent_bus_mgr = B_FALSE;
1987 
1988 		cv_signal(&hal->bus_mgr_node_cv);
1989 		mutex_exit(&hal->bus_mgr_node_mutex);
1990 	}
1991 
1992 	/* Need to free the command */
1993 	(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
1994 
1995 	/* Unlock the topology tree */
1996 	mutex_exit(&hal->topology_tree_mutex);
1997 }
1998 
1999 /*
2000  * s1394_bus_mgr_processing()
2001  *    is called following "phase1" completion of reading Bus_Info_Blocks.
2002  *    Its purpose is to determine whether the local node is capable of
2003  *    becoming the Bus Manager (has the IRMC bit set) and if so to call
2004  *    the s1394_do_bus_mgr_processing() routine.
2005  *    NOTE: we overload DDI_FAILURE return value to mean jump back to
2006  *    the start of bus reset processing.
2007  */
2008 static int
2009 s1394_bus_mgr_processing(s1394_hal_t *hal)
2010 {
2011 	int ret;
2012 	int IRM_node_num;
2013 
2014 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2015 
2016 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2017 		return (DDI_FAILURE);
2018 	}
2019 	IRM_node_num = hal->IRM_node;
2020 	s1394_unlock_tree(hal);
2021 
2022 	ret = DDI_SUCCESS;
2023 
2024 	/* If we are IRM capable, then do bus_mgr stuff... */
2025 	if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
2026 		/* If there is an IRM, then do bus_mgr stuff */
2027 		if (IRM_node_num != -1) {
2028 			if (s1394_do_bus_mgr_processing(hal))
2029 				ret = DDI_FAILURE;
2030 		}
2031 	}
2032 
2033 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2034 
2035 	return (ret);
2036 }
2037 
2038 /*
2039  * s1394_do_bus_mgr_processing()
2040  *    is used to perform those operations expected of the Bus Manager.
2041  *    After being called, s1394_do_bus_mgr_processing() looks at the value
2042  *    in bus_mgr_node and waits if it is -1 (Bus Manager has not been
2043  *    chosen yet).  Then, if there is more than one node on the 1394 bus,
2044  *    and we are either the Bus Manager or (if there is no Bus Manager)
2045  *    the IRM, it optimizes the gap_count and/or sets the cycle master's
2046  *    root holdoff bit (to ensure that the cycle master is/stays root).
2047  *
2048  *    NOTE: we overload DDI_FAILURE return value to mean jump back to
2049  *    the start of bus reset processing.
2050  */
2051 static int
2052 s1394_do_bus_mgr_processing(s1394_hal_t *hal)
2053 {
2054 	int	ret;
2055 	int	IRM_flags, hal_bus_mgr_node;
2056 	int	IRM_node_num;
2057 	uint_t	hal_node_num, number_of_nodes;
2058 	int	new_root, new_gap_cnt;
2059 
2060 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2061 
2062 	/* Wait for Bus Manager to be determined */
2063 	/* or a Bus Reset to happen */
2064 	mutex_enter(&hal->bus_mgr_node_mutex);
2065 	if (hal->bus_mgr_node == -1)
2066 		cv_wait(&hal->bus_mgr_node_cv, &hal->bus_mgr_node_mutex);
2067 
2068 	/* Check if a BUS RESET has come while we've been waiting */
2069 	mutex_enter(&hal->br_thread_mutex);
2070 	if (hal->br_thread_ev_type & (BR_THR_CFGROM_SCAN | BR_THR_GO_AWAY)) {
2071 
2072 		mutex_exit(&hal->br_thread_mutex);
2073 		mutex_exit(&hal->bus_mgr_node_mutex);
2074 
2075 		return (1);
2076 	}
2077 	mutex_exit(&hal->br_thread_mutex);
2078 
2079 	hal_bus_mgr_node = hal->bus_mgr_node;
2080 	mutex_exit(&hal->bus_mgr_node_mutex);
2081 
2082 	if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2083 		return (1);
2084 	}
2085 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
2086 	IRM_node_num = hal->IRM_node;
2087 	number_of_nodes = hal->number_of_nodes;
2088 
2089 	ret = 0;
2090 
2091 	/* If we are the bus_mgr or if there is no bus_mgr */
2092 	/* the IRM and there is > 1 nodes on the bus */
2093 	if ((number_of_nodes > 1) &&
2094 	    ((hal_bus_mgr_node == (int)hal_node_num) ||
2095 		((hal_bus_mgr_node == S1394_INVALID_NODE_NUM) &&
2096 		    (IRM_node_num == (int)hal_node_num)))) {
2097 
2098 		IRM_flags = 0;
2099 
2100 		/* Make sure the root node is cycle master capable */
2101 		if (!s1394_cycle_master_capable(hal)) {
2102 			/* Make the local node root */
2103 			new_root = hal_node_num;
2104 			IRM_flags = IRM_flags | ROOT_HOLDOFF;
2105 
2106 			/* If setting root, then optimize gap_count */
2107 			new_gap_cnt = hal->optimum_gap_count;
2108 			IRM_flags = IRM_flags | GAP_COUNT;
2109 
2110 		} else {
2111 			/* Make sure root's ROOT_HOLDOFF bit is set */
2112 			new_root = (number_of_nodes - 1);
2113 			IRM_flags = IRM_flags | ROOT_HOLDOFF;
2114 		}
2115 		if (hal->gap_count > hal->optimum_gap_count) {
2116 			/* Set the gap_count to optimum */
2117 			new_gap_cnt = hal->optimum_gap_count;
2118 			IRM_flags = IRM_flags | GAP_COUNT;
2119 
2120 		}
2121 
2122 		s1394_unlock_tree(hal);
2123 
2124 		if (IRM_flags) {
2125 			ret = s1394_do_phy_config_pkt(hal, new_root,
2126 			    new_gap_cnt, IRM_flags);
2127 		}
2128 		return (ret);
2129 	}
2130 
2131 	s1394_unlock_tree(hal);
2132 
2133 	return (ret);
2134 }
2135 
2136 /*
2137  * s1394_bus_mgr_timers_stop()
2138  *    Cancels bus manager timeouts
2139  */
2140 /*ARGSUSED*/
2141 static void
2142 s1394_bus_mgr_timers_stop(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
2143     timeout_id_t *bus_mgr_tid)
2144 {
2145 	/* Cancel the Bus Mgr timeouts (if necessary) */
2146 	if (*bus_mgr_tid != 0) {
2147 		(void) untimeout(*bus_mgr_tid);
2148 		*bus_mgr_tid = 0;
2149 	}
2150 	if (*bus_mgr_query_tid != 0) {
2151 		(void) untimeout(*bus_mgr_query_tid);
2152 		*bus_mgr_query_tid = 0;
2153 	}
2154 }
2155 
2156 /*
2157  * s1394_bus_mgr_timers_start()
2158  *    Starts bus manager timeouts if the hal is IRM capable.
2159  */
2160 static void
2161 s1394_bus_mgr_timers_start(s1394_hal_t *hal, timeout_id_t *bus_mgr_query_tid,
2162     timeout_id_t *bus_mgr_tid)
2163 {
2164 	boolean_t incumbant;
2165 	uint_t	  hal_node_num;
2166 	int	  IRM_node_num;
2167 
2168 	mutex_enter(&hal->topology_tree_mutex);
2169 
2170 	IRM_node_num = hal->IRM_node;
2171 	hal_node_num = hal->node_id;
2172 
2173 	mutex_enter(&hal->bus_mgr_node_mutex);
2174 	incumbant = hal->incumbent_bus_mgr;
2175 	mutex_exit(&hal->bus_mgr_node_mutex);
2176 
2177 	/* If we are IRM capable, then do bus_mgr stuff... */
2178 	if (hal->halinfo.bus_capabilities & IEEE1394_BIB_IRMC_MASK) {
2179 		/*
2180 		 * If we are the IRM, then wait 625ms
2181 		 * before checking BUS_MANAGER_ID register
2182 		 */
2183 		if (IRM_node_num == IEEE1394_NODE_NUM(hal_node_num)) {
2184 
2185 			mutex_exit(&hal->topology_tree_mutex);
2186 
2187 			/* Wait 625ms, then check bus manager */
2188 			*bus_mgr_query_tid = timeout(s1394_become_bus_mgr,
2189 			    hal, drv_usectohz(IEEE1394_BM_IRM_TIMEOUT));
2190 
2191 			mutex_enter(&hal->topology_tree_mutex);
2192 		}
2193 
2194 		/* If there is an IRM on the bus */
2195 		if (IRM_node_num != -1) {
2196 			if ((incumbant == B_TRUE) &&
2197 			    (hal->abdicate_bus_mgr_bit == 0)) {
2198 				mutex_exit(&hal->topology_tree_mutex);
2199 
2200 				/* Try to become bus manager */
2201 				s1394_become_bus_mgr(hal);
2202 
2203 				mutex_enter(&hal->topology_tree_mutex);
2204 			} else {
2205 				hal->abdicate_bus_mgr_bit = 0;
2206 
2207 				mutex_exit(&hal->topology_tree_mutex);
2208 
2209 				/* Wait 125ms, then try to become bus manager */
2210 				*bus_mgr_tid = timeout(s1394_become_bus_mgr,
2211 				    hal, drv_usectohz(
2212 					IEEE1394_BM_INCUMBENT_TIMEOUT));
2213 
2214 				mutex_enter(&hal->topology_tree_mutex);
2215 			}
2216 		} else {
2217 			mutex_enter(&hal->bus_mgr_node_mutex);
2218 			hal->incumbent_bus_mgr = B_FALSE;
2219 			mutex_exit(&hal->bus_mgr_node_mutex);
2220 		}
2221 	}
2222 
2223 	mutex_exit(&hal->topology_tree_mutex);
2224 }
2225 
2226 /*
2227  * s1394_get_maxpayload()
2228  *    is used to determine a device's maximum payload size.  That is to
2229  *    say, the largest packet that can be transmitted or received by the
2230  *    the target device given the current topological (speed) constraints
2231  *    and the constraints specified in the local host's and remote device's
2232  *    Config ROM (max_rec).  Caller must hold the topology_tree_mutex and
2233  *    the target_list_rwlock as an RW_READER (at least).
2234  */
2235 /*ARGSUSED*/
2236 void
2237 s1394_get_maxpayload(s1394_target_t *target, uint_t *dev_max_payload,
2238     uint_t *current_max_payload)
2239 {
2240 	s1394_hal_t *hal;
2241 	uint32_t bus_capabilities;
2242 	uint32_t from_node;
2243 	uint32_t to_node;
2244 	uint_t local_max_rec;
2245 	uint_t local_max_blk;
2246 	uint_t max_rec;
2247 	uint_t max_blk;
2248 	uint_t curr_speed;
2249 	uint_t speed_max_blk;
2250 	uint_t temp;
2251 
2252 	/* Find the HAL this target resides on */
2253 	hal = target->on_hal;
2254 
2255 	/* Make sure we're holding the topology_tree_mutex */
2256 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2257 
2258 	/* Set dev_max_payload to local (HAL's) size */
2259 	bus_capabilities = target->on_hal->halinfo.bus_capabilities;
2260 	local_max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
2261 	    IEEE1394_BIB_MAXREC_SHIFT;
2262 	if ((local_max_rec > 0) && (local_max_rec < 14)) {
2263 		local_max_blk = 1 << (local_max_rec + 1);
2264 	} else {
2265 		/* These are either unspecified or reserved */
2266 		local_max_blk = 4;
2267 	}
2268 
2269 	/* Is this target on a node? */
2270 	if ((target->target_state & S1394_TARG_GONE) == 0 &&
2271 	    (target->on_node != NULL)) {
2272 		ASSERT(target->on_node->cfgrom != NULL);
2273 
2274 		bus_capabilities =
2275 		    target->on_node->cfgrom[IEEE1212_NODE_CAP_QUAD];
2276 		max_rec = (bus_capabilities & IEEE1394_BIB_MAXREC_MASK) >>
2277 		    IEEE1394_BIB_MAXREC_SHIFT;
2278 
2279 		if ((max_rec > 0) && (max_rec < 14)) {
2280 			max_blk = 1 << (max_rec + 1);
2281 		} else {
2282 			/* These are either unspecified or reserved */
2283 			max_blk = 4;
2284 		}
2285 		(*dev_max_payload) = max_blk;
2286 
2287 		from_node = IEEE1394_NODE_NUM(target->on_hal->node_id);
2288 		to_node = (target->on_node->node_num);
2289 
2290 		/* Speed is to be filled in from speed map */
2291 		curr_speed = (uint_t)s1394_speed_map_get(target->on_hal,
2292 		    from_node, to_node);
2293 		speed_max_blk = 512 << curr_speed;
2294 		temp = (local_max_blk < max_blk) ? local_max_blk : max_blk;
2295 		(*current_max_payload) = (temp < speed_max_blk) ? temp :
2296 		    speed_max_blk;
2297 	} else {
2298 		/* Set dev_max_payload to local (HAL's) size */
2299 		(*dev_max_payload) = local_max_blk;
2300 		(*current_max_payload) = local_max_blk;
2301 	}
2302 }
2303 
2304 /*
2305  * s1394_cycle_master_capable()
2306  *    is used to determine whether or not the current root node on the
2307  *    1394 bus has its CMC-bit set in it Config ROM.  If not, then it
2308  *    is not capable of being cycle master and a new root node must be
2309  *    selected.
2310  */
2311 static int
2312 s1394_cycle_master_capable(s1394_hal_t *hal)
2313 {
2314 	s1394_node_t	*root;
2315 	int		cycle_master_capable;
2316 	uint_t		hal_node_num;
2317 
2318 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2319 
2320 	hal_node_num = IEEE1394_NODE_NUM(hal->node_id);
2321 
2322 	/* Get a pointer to the root node */
2323 	root = s1394_topology_tree_get_root_node(hal);
2324 
2325 	/* Ignore, if we are already root */
2326 	if (root == &hal->topology_tree[hal_node_num]) {
2327 		return (1);
2328 	}
2329 
2330 	/*
2331 	 * We want to pick a new root if link is off or we don't have
2332 	 * valid config rom
2333 	 */
2334 	if (LINK_ACTIVE(root) == B_FALSE || root->cfgrom == NULL ||
2335 	    CFGROM_BIB_READ(root) == 0) {
2336 
2337 		return (0);
2338 	}
2339 
2340 	/* Check the Cycle Master bit in the Bus Info Block */
2341 	cycle_master_capable = root->cfgrom[IEEE1212_NODE_CAP_QUAD] &
2342 	    IEEE1394_BIB_CMC_MASK;
2343 
2344 	if (cycle_master_capable) {
2345 		return (1);
2346 	} else {
2347 		return (0);
2348 	}
2349 }
2350 
2351 /*
2352  * s1394_do_phy_config_pkt()
2353  *    is called by s1394_do_bus_mgr_processing() to setup and send out
2354  *    a PHY configuration packet onto the 1394 bus.  Depending on the
2355  *    values in IRM_flags, the gap_count and root_holdoff bits on the
2356  *    bus will be affected by this packet.
2357  *
2358  *    NOTE: we overload DDI_FAILURE return value to mean jump back to
2359  *    the start of bus reset processing.
2360  */
2361 static int
2362 s1394_do_phy_config_pkt(s1394_hal_t *hal, int new_root, int new_gap_cnt,
2363     uint32_t IRM_flags)
2364 {
2365 	cmd1394_cmd_t	 *cmd;
2366 	s1394_cmd_priv_t *s_priv;
2367 	h1394_cmd_priv_t *h_priv;
2368 	uint32_t	 pkt_data = 0;
2369 	uint32_t	 gap_cnt = 0;
2370 	uint32_t	 root = 0;
2371 	int		 ret, result;
2372 	uint_t		 flags = 0;
2373 
2374 	/* Gap count needs to be optimized */
2375 	if (IRM_flags & GAP_COUNT) {
2376 
2377 		pkt_data = pkt_data | IEEE1394_PHY_CONFIG_T_BIT_MASK;
2378 		gap_cnt = ((uint32_t)new_gap_cnt) <<
2379 		    IEEE1394_PHY_CONFIG_GAP_CNT_SHIFT;
2380 		gap_cnt = gap_cnt & IEEE1394_PHY_CONFIG_GAP_CNT_MASK;
2381 		pkt_data = pkt_data | gap_cnt;
2382 
2383 		(void) HAL_CALL(hal).set_gap_count(hal->halinfo.hal_private,
2384 		    (uint_t)new_gap_cnt);
2385 	}
2386 
2387 	/* Root node needs to be changed */
2388 	if (IRM_flags & ROOT_HOLDOFF) {
2389 
2390 		pkt_data = pkt_data | IEEE1394_PHY_CONFIG_R_BIT_MASK;
2391 		root = ((uint32_t)new_root) <<
2392 		    IEEE1394_PHY_CONFIG_ROOT_HOLD_SHIFT;
2393 		root = root & IEEE1394_PHY_CONFIG_ROOT_HOLD_MASK;
2394 		pkt_data = pkt_data | root;
2395 
2396 		(void) HAL_CALL(hal).set_root_holdoff_bit(
2397 		    hal->halinfo.hal_private);
2398 	}
2399 
2400 
2401 	if (IRM_flags) {
2402 		if (s1394_alloc_cmd(hal, flags, &cmd) != DDI_SUCCESS) {
2403 			return (0);
2404 		}
2405 
2406 		if (s1394_lock_tree(hal) != DDI_SUCCESS) {
2407 			/* lock tree failure indicates a bus gen change */
2408 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
2409 			return (1);
2410 		}
2411 
2412 		/* Setup the callback routine */
2413 		cmd->completion_callback  = s1394_phy_config_callback;
2414 		cmd->cmd_callback_arg	  = (void *)(uintptr_t)IRM_flags;
2415 		cmd->bus_generation	  = hal->generation_count;
2416 		cmd->cmd_options	  = CMD1394_OVERRIDE_ADDR;
2417 		cmd->cmd_type		  = CMD1394_ASYNCH_WR_QUAD;
2418 		cmd->cmd_u.q.quadlet_data = pkt_data;
2419 
2420 		/* Get the Services Layer private area */
2421 		s_priv = S1394_GET_CMD_PRIV(cmd);
2422 
2423 		/* Get a pointer to the HAL private struct */
2424 		h_priv = (h1394_cmd_priv_t *)&s_priv->hal_cmd_private;
2425 
2426 		s_priv->sent_by_target	= (s1394_target_t *)NULL;
2427 		s_priv->sent_on_hal	= (s1394_hal_t *)hal;
2428 
2429 		h_priv->bus_generation	= cmd->bus_generation;
2430 
2431 		/* Speed must be IEEE1394_S100 on PHY config packets */
2432 		s_priv->hal_cmd_private.speed = IEEE1394_S100;
2433 
2434 		/* Mark command as being used */
2435 		s_priv->cmd_in_use = B_TRUE;
2436 
2437 		s1394_unlock_tree(hal);
2438 
2439 		/* Put command on the HAL's outstanding request Q */
2440 		s1394_insert_q_asynch_cmd(hal, cmd);
2441 
2442 		ret = HAL_CALL(hal).send_phy_configuration_packet(
2443 		    hal->halinfo.hal_private, (cmd1394_cmd_t *)cmd,
2444 		    (h1394_cmd_priv_t *)&s_priv->hal_cmd_private, &result);
2445 
2446 		if (ret != DDI_SUCCESS) {
2447 			(void) s1394_free_cmd(hal, (cmd1394_cmd_t **)&cmd);
2448 
2449 			return (0);
2450 
2451 		} else {
2452 			/*
2453 			 * There will be a bus reset only if GAP_COUNT changed
2454 			 */
2455 			if (IRM_flags & GAP_COUNT) {
2456 				return (1);
2457 			}
2458 		}
2459 	}
2460 
2461 	return (0);
2462 }
2463 
2464 /*
2465  * s1394_phy_config_callback()
2466  *    is the callback called after the PHY configuration packet has been
2467  *    sent out onto the 1394 bus.  Depending on the values in IRM_flags,
2468  *    (specifically if the gap_count has been changed) this routine may
2469  *    initiate a bus reset.
2470  */
2471 static void
2472 s1394_phy_config_callback(cmd1394_cmd_t *cmd)
2473 {
2474 	s1394_cmd_priv_t *s_priv;
2475 	s1394_hal_t *hal;
2476 	uint32_t IRM_flags;
2477 
2478 	/* Get the Services Layer private area */
2479 	s_priv = S1394_GET_CMD_PRIV(cmd);
2480 
2481 	hal = (s1394_hal_t *)s_priv->sent_on_hal;
2482 
2483 	IRM_flags = (uint32_t)(uintptr_t)cmd->cmd_callback_arg;
2484 
2485 	if (cmd->cmd_result != CMD1394_CMDSUCCESS) {
2486 		(void) s1394_free_cmd(hal, &cmd);
2487 	} else {
2488 		(void) s1394_free_cmd(hal, &cmd);
2489 
2490 		/* Only need a bus reset if we changed GAP_COUNT */
2491 		if (IRM_flags & GAP_COUNT) {
2492 			s1394_initiate_hal_reset(hal, NON_CRITICAL);
2493 		}
2494 	}
2495 }
2496 
2497 /*
2498  * s1394_lock_tree()
2499  *    Attempts to lock the topology tree. Returns DDI_FAILURE if generations
2500  *    changed or if the services layer signals the bus reset thread to go
2501  *    away. Otherwise, returns DDI_SUCCESS.
2502  */
2503 int
2504 s1394_lock_tree(s1394_hal_t *hal)
2505 {
2506 	ASSERT(MUTEX_NOT_HELD(&hal->br_thread_mutex));
2507 	ASSERT(MUTEX_NOT_HELD(&hal->topology_tree_mutex));
2508 
2509 	mutex_enter(&hal->br_thread_mutex);
2510 	ndi_devi_enter(hal->halinfo.dip);
2511 	mutex_enter(&hal->topology_tree_mutex);
2512 
2513 	if ((hal->br_thread_ev_type & BR_THR_GO_AWAY) != 0) {
2514 		mutex_exit(&hal->br_thread_mutex);
2515 		mutex_exit(&hal->topology_tree_mutex);
2516 		ndi_devi_exit(hal->halinfo.dip);
2517 		return (DDI_FAILURE);
2518 	} else if (hal->br_cfgrom_read_gen != hal->generation_count) {
2519 		mutex_exit(&hal->br_thread_mutex);
2520 		mutex_exit(&hal->topology_tree_mutex);
2521 		ndi_devi_exit(hal->halinfo.dip);
2522 		return (DDI_FAILURE);
2523 	}
2524 
2525 	mutex_exit(&hal->br_thread_mutex);
2526 
2527 	return (DDI_SUCCESS);
2528 }
2529 
2530 /*
2531  * s1394_unlock_tree()
2532  *    Unlocks the topology tree
2533  */
2534 void
2535 s1394_unlock_tree(s1394_hal_t *hal)
2536 {
2537 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2538 	mutex_exit(&hal->topology_tree_mutex);
2539 	ndi_devi_exit(hal->halinfo.dip);
2540 }
2541 
2542 /*
2543  * s1394_calc_next_quad()
2544  *    figures out the next quadlet to read. This maintains a stack of
2545  *    directories in the node. When the first quad of a directory (the
2546  *    first directory would be the root directory) is read, it is pushed on
2547  *    the this stack. When the directory is all read, it scans the directory
2548  *    looking for indirect entries. If any indirect directory entry is found,
2549  *    it is pushed on stack and that directory is read. If we are done dealing
2550  *    with all entries in the current dir, the directory is popped off the
2551  *    stack. If the stack is empty, we are back at the root directory level
2552  *    and essentially read the entire directory hierarchy.
2553  *    Returns 0 is more quads to read, else returns non-zero.
2554  */
2555 static int
2556 s1394_calc_next_quad(s1394_hal_t *hal, s1394_node_t *node, uint32_t quadlet,
2557     uint32_t *nextquadp)
2558 {
2559 	uint32_t data, type, key, value, *ptr;
2560 
2561 	ASSERT(MUTEX_HELD(&hal->topology_tree_mutex));
2562 
2563 	if (((quadlet + 1) >= node->cfgrom_size) ||
2564 	    (CFGROM_SIZE_IS_CRCSIZE(node) == B_TRUE && (quadlet + 1) >=
2565 		node->cfgrom_valid_size)) {
2566 		return (1);
2567 	}
2568 
2569 	if (s1394_turn_off_dir_stack != 0 || CFGROM_DIR_STACK_OFF(node) ==
2570 	    B_TRUE) {
2571 		quadlet++;
2572 		*nextquadp = quadlet;
2573 		return (0);
2574 	}
2575 
2576 	data = node->cfgrom[quadlet];
2577 
2578 	if (quadlet == IEEE1212_ROOT_DIR_QUAD) {
2579 		node->dir_stack_top = -1;
2580 		node->expected_dir_quad = quadlet;
2581 		node->expected_type = IEEE1212_IMMEDIATE_TYPE;
2582 	}
2583 
2584 	CFGROM_TYPE_KEY_VALUE(data, type, key, value);
2585 
2586 	/*
2587 	 * check to make sure we are looking at a dir. If the config rom
2588 	 * is broken, then revert to normal scanning of the config rom
2589 	 */
2590 	if (node->expected_dir_quad == quadlet) {
2591 		if (type != 0 || key != 0) {
2592 			SET_CFGROM_DIR_STACK_OFF(node);
2593 			quadlet = IEEE1212_ROOT_DIR_QUAD;
2594 		} else {
2595 			node->cur_dir_start = quadlet;
2596 			node->cur_dir_size = IEEE1212_DIR_LEN(data);
2597 			node->expected_dir_quad = 0;
2598 			/* get the next quad */
2599 			quadlet++;
2600 		}
2601 	} else {
2602 		/*
2603 		 * If we read all quads in cur dir and the cur dir is not
2604 		 * a leaf, scan for offsets (if the directory's CRC checks
2605 		 * out OK). If we have a directory or a leaf, we save the
2606 		 * current location on the stack and start reading that
2607 		 * directory. So, we will end up with a depth first read of
2608 		 * the entire config rom. If we are done with the current
2609 		 * directory, pop it off the stack and continue the scanning
2610 		 * as appropriate.
2611 		 */
2612 		if (quadlet == node->cur_dir_start + node->cur_dir_size) {
2613 
2614 			int i, top;
2615 			boolean_t done_with_cur_dir = B_FALSE;
2616 
2617 			if (node->expected_type == IEEE1212_LEAF_TYPE) {
2618 				node->expected_type = IEEE1212_IMMEDIATE_TYPE;
2619 				done_with_cur_dir = B_TRUE;
2620 				goto donewithcurdir;
2621 			}
2622 
2623 			ptr = &node->cfgrom[node->cur_dir_start];
2624 			CFGROM_TYPE_KEY_VALUE(*ptr, type, key, value);
2625 
2626 			/*
2627 			 * If CRC for this directory is invalid, turn off
2628 			 * dir stack and start re-reading from root dir.
2629 			 * This wastes the work done thus far, but CRC
2630 			 * errors in directories should be rather rare.
2631 			 * if s1394_crcsz_is_cfgsz is set, then set
2632 			 * cfgrom_valid_size to the len specfied as crc len
2633 			 * in quadlet 0.
2634 			 */
2635 			if (s1394_valid_dir(hal, node, key, ptr) == B_FALSE) {
2636 				SET_CFGROM_DIR_STACK_OFF(node);
2637 				if (s1394_crcsz_is_cfgsz != 0) {
2638 					SET_CFGROM_SIZE_IS_CRCSIZE(node);
2639 					node->cfgrom_valid_size =
2640 					    ((node->cfgrom[0] >>
2641 					    IEEE1394_CFG_ROM_CRC_LEN_SHIFT) &
2642 					    IEEE1394_CFG_ROM_CRC_LEN_MASK);
2643 				}
2644 				*nextquadp = IEEE1212_ROOT_DIR_QUAD;
2645 				return (0);
2646 			}
2647 			i = node->cur_dir_start + 1;
2648 		rescan:
2649 			for (done_with_cur_dir = B_FALSE; i <=
2650 			    node->cur_dir_start + node->cur_dir_size; i++) {
2651 				data = node->cfgrom[i];
2652 				CFGROM_TYPE_KEY_VALUE(data, type, key, value);
2653 				/* read leaf type and directory types only */
2654 				if (type == IEEE1212_LEAF_TYPE || type ==
2655 				    IEEE1212_DIRECTORY_TYPE) {
2656 
2657 					/*
2658 					 * push current dir on stack; if the
2659 					 * stack is overflowing, ie, too many
2660 					 * directory level nestings, turn off
2661 					 * dir stack and fall back to serial
2662 					 * scanning, starting at root dir. This
2663 					 * wastes all the work we have done
2664 					 * thus far, but more than 16 levels
2665 					 * of directories is rather odd...
2666 					 */
2667 					top = ++node->dir_stack_top;
2668 					if (top == S1394_DIR_STACK_SIZE) {
2669 						SET_CFGROM_DIR_STACK_OFF(node);
2670 						*nextquadp =
2671 						    IEEE1212_ROOT_DIR_QUAD;
2672 						return (0);
2673 					}
2674 
2675 					node->dir_stack[top].dir_start =
2676 					    node->cur_dir_start;
2677 					node->dir_stack[top].dir_size =
2678 					    node->cur_dir_size;
2679 					node->dir_stack[top].dir_next_quad =
2680 					    i + 1;
2681 					/* and set the next quadlet to read */
2682 					quadlet = i + value;
2683 					node->expected_dir_quad = quadlet;
2684 					node->expected_type = type;
2685 					break;
2686 				}
2687 			}
2688 
2689 		donewithcurdir:
2690 
2691 			if ((i > node->cur_dir_start + node->cur_dir_size) ||
2692 				done_with_cur_dir == B_TRUE) {
2693 
2694 				/*
2695 				 * all done with cur dir; pop it off the stack
2696 				 */
2697 				if (node->dir_stack_top >= 0) {
2698 					top = node->dir_stack_top--;
2699 					node->cur_dir_start =
2700 					    node->dir_stack[top].dir_start;
2701 					node->cur_dir_size =
2702 					    node->dir_stack[top].dir_size;
2703 					i = node->dir_stack[top].dir_next_quad;
2704 					goto rescan;
2705 				} else {
2706 					/*
2707 					 * if empty stack, we are at the top
2708 					 * level; declare done.
2709 					 */
2710 					return (1);
2711 				}
2712 			}
2713 		} else {
2714 			/* get the next quadlet */
2715 			quadlet++;
2716 		}
2717 	}
2718 	*nextquadp = quadlet;
2719 
2720 	return (0);
2721 }
2722