1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2023 Tintri by DDN, Inc. All rights reserved.
14  * Copyright 2021 Racktop Systems.
15  */
16 
17 /*
18  * This file contains the start up code to initialize the HBA for use
19  * with the PQI interface.
20  */
21 #include <smartpqi.h>
22 
23 #define	PQI_DEVICE_SIGNATURE			"PQI DREG"
24 #define	PQI_STATUS_IDLE				0x0
25 #define	PQI_DEVICE_STATE_ALL_REGISTERS_READY	0x2
26 
27 typedef struct _func_list_ {
28 	char		*func_name;
29 	boolean_t	(*func)(pqi_state_t *);
30 } func_list_t;
31 
32 static boolean_t pqi_reset_prep(pqi_state_t *);
33 static boolean_t pqi_ctlr_ready(pqi_state_t *);
34 static boolean_t revert_to_sis(pqi_state_t *);
35 static boolean_t pqi_calculate_io_resources(pqi_state_t *);
36 static boolean_t pqi_check_alloc(pqi_state_t *);
37 static boolean_t pqi_wait_for_mode_ready(pqi_state_t *);
38 static boolean_t save_ctrl_mode_pqi(pqi_state_t *);
39 static boolean_t pqi_process_config_table(pqi_state_t *);
40 static boolean_t pqi_alloc_admin_queue(pqi_state_t *);
41 static boolean_t pqi_create_admin_queues(pqi_state_t *);
42 static boolean_t pqi_report_device_capability(pqi_state_t *);
43 static boolean_t pqi_valid_device_capability(pqi_state_t *);
44 static boolean_t pqi_calculate_queue_resources(pqi_state_t *);
45 static boolean_t pqi_alloc_io_resource(pqi_state_t *);
46 static boolean_t pqi_alloc_operation_queues(pqi_state_t *);
47 static boolean_t pqi_init_operational_queues(pqi_state_t *);
48 static boolean_t pqi_init_operational_locks(pqi_state_t *);
49 static boolean_t pqi_create_queues(pqi_state_t *);
50 static boolean_t pqi_change_irq_mode(pqi_state_t *);
51 static boolean_t pqi_start_heartbeat_timer(pqi_state_t *);
52 static boolean_t pqi_enable_events(pqi_state_t *);
53 static boolean_t pqi_get_hba_version(pqi_state_t *);
54 static boolean_t pqi_version_to_hba(pqi_state_t *);
55 static boolean_t pqi_schedule_update_time_worker(pqi_state_t *);
56 static boolean_t pqi_scan_scsi_devices(pqi_state_t *);
57 
58 func_list_t startup_funcs[] =
59 {
60 	{ "sis_wait_for_ctrl_ready", sis_wait_for_ctrl_ready },
61 	{ "sis_get_ctrl_props", sis_get_ctrl_props },
62 	{ "sis_get_pqi_capabilities", sis_get_pqi_capabilities },
63 	{ "pqi_calculate_io_resources", pqi_calculate_io_resources },
64 	{ "pqi_check_alloc", pqi_check_alloc },
65 	{ "sis_init_base_struct_addr", sis_init_base_struct_addr },
66 	{ "pqi_wait_for_mode_ready", pqi_wait_for_mode_ready },
67 	{ "save_ctrl_mode_pqi", save_ctrl_mode_pqi },
68 	{ "pqi_process_config_table", pqi_process_config_table },
69 	{ "pqi_alloc_admin_queue", pqi_alloc_admin_queue },
70 	{ "pqi_create_admin_queues", pqi_create_admin_queues },
71 	{ "pqi_report_device_capability", pqi_report_device_capability },
72 	{ "pqi_valid_device_capability", pqi_valid_device_capability },
73 	{ "pqi_calculate_queue_resources", pqi_calculate_queue_resources },
74 	{ "pqi_alloc_io_resource", pqi_alloc_io_resource },
75 	{ "pqi_alloc_operation_queues", pqi_alloc_operation_queues },
76 	{ "pqi_init_operational_queues", pqi_init_operational_queues },
77 	{ "pqi_init_operational_locks", pqi_init_operational_locks },
78 	{ "pqi_create_queues", pqi_create_queues },
79 	{ "pqi_change_irq_mode", pqi_change_irq_mode },
80 	{ "pqi_start_heartbeat_timer", pqi_start_heartbeat_timer },
81 	{ "pqi_enable_events", pqi_enable_events },
82 	{ "pqi_get_hba_version", pqi_get_hba_version },
83 	{ "pqi_version_to_hba", pqi_version_to_hba },
84 	{ "pqi_schedule_update_time_worker", pqi_schedule_update_time_worker },
85 	{ "pqi_scan_scsi_devices", pqi_scan_scsi_devices },
86 	{ NULL, NULL }
87 };
88 
89 func_list_t reset_funcs[] =
90 {
91 	{ "pqi_reset_prep", pqi_reset_prep },
92 	{ "revert_to_sis", revert_to_sis },
93 	{ "pqi_check_firmware", pqi_check_firmware },
94 	{ "sis_wait_for_ctrl_ready", sis_wait_for_ctrl_ready },
95 	{ "sis_get_ctrl_props", sis_get_ctrl_props },
96 	{ "sis_get_pqi_capabilities", sis_get_pqi_capabilities },
97 	{ "pqi_calculate_io_resources", pqi_calculate_io_resources },
98 	{ "pqi_check_alloc", pqi_check_alloc },
99 	{ "sis_init_base_struct_addr", sis_init_base_struct_addr },
100 	{ "pqi_wait_for_mode_ready", pqi_wait_for_mode_ready },
101 	{ "save_ctrl_mode_pqi", save_ctrl_mode_pqi },
102 	{ "pqi_process_config_table", pqi_process_config_table },
103 	{ "pqi_alloc_admin_queue", pqi_alloc_admin_queue },
104 	{ "pqi_create_admin_queues", pqi_create_admin_queues },
105 	{ "pqi_report_device_capability", pqi_report_device_capability },
106 	{ "pqi_valid_device_capability", pqi_valid_device_capability },
107 	{ "pqi_calculate_queue_resources", pqi_calculate_queue_resources },
108 	{ "pqi_alloc_io_resource", pqi_alloc_io_resource },
109 	{ "pqi_alloc_operation_queues", pqi_alloc_operation_queues },
110 	{ "pqi_init_operational_queues", pqi_init_operational_queues },
111 	{ "pqi_create_queues", pqi_create_queues },
112 	{ "pqi_change_irq_mode", pqi_change_irq_mode },
113 	{ "pqi_ctlr_ready", pqi_ctlr_ready },
114 	{ "pqi_start_heartbeat_timer", pqi_start_heartbeat_timer },
115 	{ "pqi_enable_events", pqi_enable_events },
116 	{ "pqi_get_hba_version", pqi_get_hba_version },
117 	{ "pqi_version_to_hba", pqi_version_to_hba },
118 	{ "pqi_schedule_update_time_worker", pqi_schedule_update_time_worker },
119 	{ NULL, NULL }
120 };
121 
122 /* ---- Forward declarations for utility functions ---- */
123 static void bcopy_fromregs(pqi_state_t *s, uint8_t *iomem, uint8_t *dst,
124     uint32_t len);
125 static boolean_t submit_admin_rqst_sync(pqi_state_t *s,
126     pqi_general_admin_request_t *rqst, pqi_general_admin_response_t *rsp);
127 static boolean_t create_event_queue(pqi_state_t *s);
128 static boolean_t create_queue_group(pqi_state_t *s, int idx);
129 static boolean_t submit_raid_rqst_sync(pqi_state_t *s, pqi_iu_header_t *rqst,
130     pqi_raid_error_info_t e_info);
131 static boolean_t identify_controller(pqi_state_t *s,
132     bmic_identify_controller_t *ident);
133 static boolean_t write_host_wellness(pqi_state_t *s, void *buf, size_t len);
134 static boolean_t get_device_list(pqi_state_t *s,
135     report_phys_lun_extended_t **pl, report_log_lun_extended_t **ll);
136 static boolean_t build_raid_path_request(pqi_raid_path_request_t *rqst, int cmd,
137     caddr_t lun, uint32_t len, int vpd_page);
138 static boolean_t identify_physical_device(pqi_state_t *s, pqi_device_t *devp,
139     bmic_identify_physical_device_t *buf);
140 static pqi_device_t *create_phys_dev(pqi_state_t *s,
141     report_phys_lun_extended_entry_t *e);
142 static pqi_device_t *create_logical_dev(pqi_state_t *s,
143     report_log_lun_extended_entry_t *e);
144 static boolean_t is_new_dev(pqi_state_t *s, pqi_device_t *new_dev);
145 static boolean_t revert_to_sis(pqi_state_t *s);
146 static void save_ctrl_mode(pqi_state_t *s, int mode);
147 static boolean_t scsi_common(pqi_state_t *s, pqi_raid_path_request_t *rqst,
148     caddr_t buf, int len);
149 static void update_time(void *v);
150 
151 static int reset_devices = 1;
152 
153 int pqi_max_io_slots = PQI_MAX_IO_SLOTS;
154 
155 static boolean_t
156 pqi_reset_prep(pqi_state_t *s)
157 {
158 	s->s_intr_ready = B_FALSE;
159 	(void) untimeout(s->s_time_of_day);
160 	(void) untimeout(s->s_watchdog);
161 	pqi_free_single(s, s->s_error_dma);
162 	s->s_error_dma = NULL;
163 
164 	pqi_free_single(s, s->s_adminq_dma);
165 	s->s_adminq_dma = NULL;
166 
167 	mutex_enter(&s->s_io_mutex);
168 	pqi_free_io_resource(s);
169 	mutex_exit(&s->s_io_mutex);
170 	return (B_TRUE);
171 }
172 
173 static boolean_t
174 pqi_ctlr_ready(pqi_state_t *s)
175 {
176 	s->s_offline = B_FALSE;
177 	return (B_TRUE);
178 }
179 
180 boolean_t
181 pqi_check_firmware(pqi_state_t *s)
182 {
183 	uint32_t	status;
184 
185 	status = G32(s, sis_firmware_status);
186 	if (status & SIS_CTRL_KERNEL_PANIC)
187 		return (B_FALSE);
188 
189 	if (sis_read_scratch(s) == SIS_MODE)
190 		return (B_TRUE);
191 
192 	if (status & SIS_CTRL_KERNEL_UP) {
193 		sis_write_scratch(s, SIS_MODE);
194 		return (B_TRUE);
195 	} else {
196 		return (revert_to_sis(s));
197 	}
198 }
199 
200 boolean_t
201 pqi_prep_full(pqi_state_t *s)
202 {
203 	func_list_t	*f;
204 
205 	for (f = startup_funcs; f->func_name != NULL; f++)
206 		if (f->func(s) == B_FALSE) {
207 			cmn_err(CE_WARN, "Init failed on %s", f->func_name);
208 			return (B_FALSE);
209 		}
210 
211 	return (B_TRUE);
212 }
213 
214 boolean_t
215 pqi_reset_ctl(pqi_state_t *s)
216 {
217 	func_list_t	*f;
218 
219 	for (f = reset_funcs; f->func_name != NULL; f++)
220 		if (f->func(s) == B_FALSE) {
221 			cmn_err(CE_WARN, "Reset failed on %s", f->func_name);
222 			return (B_FALSE);
223 		}
224 
225 	return (B_TRUE);
226 }
227 /*
228  * []----------------------------------------------------------[]
229  * | Startup functions called in sequence to initialize HBA.	|
230  * []----------------------------------------------------------[]
231  */
232 
233 static boolean_t
234 pqi_calculate_io_resources(pqi_state_t *s)
235 {
236 	uint32_t	max_xfer_size;
237 	uint32_t	max_sg_entries;
238 
239 	s->s_max_io_slots = s->s_max_outstanding_requests;
240 
241 	max_xfer_size = min(s->s_max_xfer_size, PQI_MAX_TRANSFER_SIZE);
242 
243 	/* ---- add 1 when buf is not page aligned ---- */
244 	max_sg_entries = max_xfer_size / PAGESIZE + 1;
245 	max_sg_entries = min(max_sg_entries, s->s_max_sg_entries);
246 	max_xfer_size = (max_sg_entries - 1) * PAGESIZE;
247 
248 	s->s_sg_chain_buf_length = (max_sg_entries * sizeof (pqi_sg_entry_t)) +
249 	    PQI_EXTRA_SGL_MEMORY;
250 
251 	s->s_max_sectors = max_xfer_size / 512;
252 
253 	return (B_TRUE);
254 }
255 
256 static boolean_t
257 pqi_check_alloc(pqi_state_t *s)
258 {
259 	/*
260 	 * Note that we need to pass a generation cnt as part of a i/o
261 	 * request id.  The id is limited to 16 bits and we reserve 4 bits
262 	 * for a generation no.  This means we must limit s_max_io_slots
263 	 * to max 12 bits worth of slot indexes.
264 	 */
265 	if (pqi_max_io_slots != 0 && pqi_max_io_slots < s->s_max_io_slots) {
266 		s->s_max_io_slots = pqi_max_io_slots;
267 	}
268 
269 	s->s_error_dma = pqi_alloc_single(s, (s->s_max_io_slots *
270 	    PQI_ERROR_BUFFER_ELEMENT_LENGTH) + SIS_BASE_STRUCT_ALIGNMENT);
271 	if (s->s_error_dma == NULL)
272 		return (B_FALSE);
273 
274 	return (B_TRUE);
275 }
276 
277 #define	WAIT_FOR_FIRMWARE_IN_MSECS (5 * MILLISEC)
278 
279 static boolean_t
280 pqi_wait_for_mode_ready(pqi_state_t *s)
281 {
282 	uint64_t	signature;
283 	int32_t		count = WAIT_FOR_FIRMWARE_IN_MSECS;
284 
285 	for (;;) {
286 		signature = G64(s, pqi_registers.signature);
287 		if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
288 		    sizeof (signature)) == 0)
289 			break;
290 		if (count-- == 0)
291 			return (B_FALSE);
292 		drv_usecwait(MICROSEC / MILLISEC);
293 	}
294 
295 	count = WAIT_FOR_FIRMWARE_IN_MSECS;
296 	for (;;) {
297 		if (G64(s, pqi_registers.function_and_status_code) ==
298 		    PQI_STATUS_IDLE)
299 			break;
300 		if (count-- == 0)
301 			return (B_FALSE);
302 		drv_usecwait(MICROSEC / MILLISEC);
303 	}
304 
305 	count = WAIT_FOR_FIRMWARE_IN_MSECS;
306 	for (;;) {
307 		if (G32(s, pqi_registers.device_status) ==
308 		    PQI_DEVICE_STATE_ALL_REGISTERS_READY)
309 			break;
310 		if (count-- == 0)
311 			return (B_FALSE);
312 		drv_usecwait(MICROSEC / MILLISEC);
313 	}
314 
315 	return (B_TRUE);
316 }
317 
318 static boolean_t
319 save_ctrl_mode_pqi(pqi_state_t *s)
320 {
321 	save_ctrl_mode(s, PQI_MODE);
322 	return (B_TRUE);
323 }
324 
325 static boolean_t
326 pqi_process_config_table(pqi_state_t *s)
327 {
328 	pqi_config_table_t			*c_table;
329 	pqi_config_table_section_header_t	*section;
330 	uint32_t				section_offset;
331 
332 	c_table = kmem_zalloc(s->s_config_table_len, KM_SLEEP);
333 	bcopy_fromregs(s, (uint8_t *)s->s_reg + s->s_config_table_offset,
334 	    (uint8_t *)c_table, s->s_config_table_len);
335 
336 	section_offset = c_table->first_section_offset;
337 	while (section_offset) {
338 		section = (pqi_config_table_section_header_t *)
339 		    ((caddr_t)c_table + section_offset);
340 		switch (section->section_id) {
341 		case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
342 			/* LINTED E_BAD_PTR_CAST_ALIGN */
343 			s->s_heartbeat_counter = (uint32_t *)
344 			    ((caddr_t)s->s_reg +
345 			    s->s_config_table_offset + section_offset +
346 			    offsetof(struct pqi_config_table_heartbeat,
347 			    heartbeat_counter));
348 			break;
349 		}
350 		section_offset = section->next_section_offset;
351 	}
352 	kmem_free(c_table, s->s_config_table_len);
353 	return (B_TRUE);
354 }
355 
356 static boolean_t
357 pqi_alloc_admin_queue(pqi_state_t *s)
358 {
359 	pqi_admin_queues_t		*aq;
360 	pqi_admin_queues_aligned_t	*aq_aligned;
361 	int				len;
362 
363 	len = sizeof (*aq_aligned) + PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
364 	if ((s->s_adminq_dma = pqi_alloc_single(s, len)) == NULL)
365 		return (B_FALSE);
366 	(void) memset(s->s_adminq_dma->alloc_memory, 0,
367 	    s->s_adminq_dma->len_to_alloc);
368 	(void) ddi_dma_sync(s->s_adminq_dma->handle, 0,
369 	    s->s_adminq_dma->len_to_alloc, DDI_DMA_SYNC_FORDEV);
370 
371 	aq = &s->s_admin_queues;
372 	aq_aligned = PQIALIGN_TYPED(s->s_adminq_dma->alloc_memory,
373 	    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, pqi_admin_queues_aligned_t *);
374 	aq->iq_element_array = (caddr_t)&aq_aligned->iq_element_array;
375 	aq->oq_element_array = (caddr_t)&aq_aligned->oq_element_array;
376 	aq->iq_ci = &aq_aligned->iq_ci;
377 	aq->oq_pi = &aq_aligned->oq_pi;
378 
379 	aq->iq_element_array_bus_addr = s->s_adminq_dma->dma_addr +
380 	    ((uintptr_t)aq->iq_element_array -
381 	    (uintptr_t)s->s_adminq_dma->alloc_memory);
382 	aq->oq_element_array_bus_addr = s->s_adminq_dma->dma_addr +
383 	    ((uintptr_t)aq->oq_element_array -
384 	    (uintptr_t)s->s_adminq_dma->alloc_memory);
385 
386 	aq->iq_ci_bus_addr = s->s_adminq_dma->dma_addr +
387 	    ((uintptr_t)aq->iq_ci - (uintptr_t)s->s_adminq_dma->alloc_memory);
388 	aq->oq_pi_bus_addr = s->s_adminq_dma->dma_addr +
389 	    ((uintptr_t)aq->oq_pi - (uintptr_t)s->s_adminq_dma->alloc_memory);
390 	return (B_TRUE);
391 }
392 
393 static boolean_t
394 pqi_create_admin_queues(pqi_state_t *s)
395 {
396 	pqi_admin_queues_t *aq = &s->s_admin_queues;
397 	int			val;
398 	int			status;
399 	int			countdown = 1000;
400 
401 
402 	aq->iq_pi_copy = 0;
403 	aq->oq_ci_copy = 0;
404 
405 	S64(s, pqi_registers.admin_iq_element_array_addr,
406 	    aq->iq_element_array_bus_addr);
407 	S64(s, pqi_registers.admin_oq_element_array_addr,
408 	    aq->oq_element_array_bus_addr);
409 	S64(s, pqi_registers.admin_iq_ci_addr,
410 	    aq->iq_ci_bus_addr);
411 	S64(s, pqi_registers.admin_oq_pi_addr,
412 	    aq->oq_pi_bus_addr);
413 
414 	val = PQI_ADMIN_IQ_NUM_ELEMENTS | PQI_ADMIN_OQ_NUM_ELEMENTS << 8 |
415 	    aq->int_msg_num << 16;
416 	S32(s, pqi_registers.admin_queue_params, val);
417 	S64(s, pqi_registers.function_and_status_code,
418 	    PQI_CREATE_ADMIN_QUEUE_PAIR);
419 
420 	while (countdown-- > 0) {
421 		status = G64(s, pqi_registers.function_and_status_code);
422 		if (status == PQI_STATUS_IDLE)
423 			break;
424 		drv_usecwait(1000);	/* ---- Wait 1ms ---- */
425 	}
426 	if (countdown == 0)
427 		return (B_FALSE);
428 
429 	/*
430 	 * The offset registers are not initialized to the correct
431 	 * offsets until *after* the create admin queue pair command
432 	 * completes successfully.
433 	 */
434 	aq->iq_pi = (void *)(intptr_t)((intptr_t)s->s_reg +
435 	    PQI_DEVICE_REGISTERS_OFFSET +
436 	    G64(s, pqi_registers.admin_iq_pi_offset));
437 	ASSERT((G64(s, pqi_registers.admin_iq_pi_offset) +
438 	    PQI_DEVICE_REGISTERS_OFFSET) < 0x8000);
439 
440 	aq->oq_ci = (void *)(intptr_t)((intptr_t)s->s_reg +
441 	    PQI_DEVICE_REGISTERS_OFFSET +
442 	    G64(s, pqi_registers.admin_oq_ci_offset));
443 	ASSERT((G64(s, pqi_registers.admin_oq_ci_offset) +
444 	    PQI_DEVICE_REGISTERS_OFFSET) < 0x8000);
445 
446 	return (B_TRUE);
447 }
448 
449 static boolean_t
450 pqi_report_device_capability(pqi_state_t *s)
451 {
452 	pqi_general_admin_request_t	rqst;
453 	pqi_general_admin_response_t	rsp;
454 	pqi_device_capability_t		*cap;
455 	pqi_iu_layer_descriptor_t	*iu_layer;
456 	pqi_dma_overhead_t		*dma;
457 	boolean_t			rval;
458 	pqi_sg_entry_t			*sg;
459 
460 	(void) memset(&rqst, 0, sizeof (rqst));
461 
462 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
463 	rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
464 	rqst.function_code =
465 	    PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
466 	rqst.data.report_device_capability.buffer_length =
467 	    sizeof (*cap);
468 
469 	if ((dma = pqi_alloc_single(s, sizeof (*cap))) == NULL)
470 		return (B_FALSE);
471 
472 	sg = &rqst.data.report_device_capability.sg_descriptor;
473 	sg->sg_addr = dma->dma_addr;
474 	sg->sg_len = dma->len_to_alloc;
475 	sg->sg_flags = CISS_SG_LAST;
476 
477 	rval = submit_admin_rqst_sync(s, &rqst, &rsp);
478 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
479 	cap = (pqi_device_capability_t *)dma->alloc_memory;
480 
481 	s->s_max_inbound_queues = cap->max_inbound_queues;
482 	s->s_max_elements_per_iq = cap->max_elements_per_iq;
483 	s->s_max_iq_element_length = cap->max_iq_element_length * 16;
484 	s->s_max_outbound_queues = cap->max_outbound_queues;
485 	s->s_max_elements_per_oq = cap->max_elements_per_oq;
486 	s->s_max_oq_element_length = cap->max_oq_element_length * 16;
487 
488 	iu_layer = &cap->iu_layer_descriptors[PQI_PROTOCOL_SOP];
489 	s->s_max_inbound_iu_length_per_firmware =
490 	    iu_layer->max_inbound_iu_length;
491 	s->s_inbound_spanning_supported = iu_layer->inbound_spanning_supported;
492 	s->s_outbound_spanning_supported =
493 	    iu_layer->outbound_spanning_supported;
494 
495 	pqi_free_single(s, dma);
496 	return (rval);
497 }
498 
499 static boolean_t
500 pqi_valid_device_capability(pqi_state_t *s)
501 {
502 	if (s->s_max_iq_element_length < PQI_OPERATIONAL_IQ_ELEMENT_LENGTH)
503 		return (B_FALSE);
504 	if (s->s_max_oq_element_length < PQI_OPERATIONAL_OQ_ELEMENT_LENGTH)
505 		return (B_FALSE);
506 	if (s->s_max_inbound_iu_length_per_firmware <
507 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH)
508 		return (B_FALSE);
509 	/* ---- Controller doesn't support spanning but we need it ---- */
510 	if (!s->s_inbound_spanning_supported)
511 		return (B_FALSE);
512 	/* ---- Controller wants outbound spanning, the driver doesn't ---- */
513 	if (s->s_outbound_spanning_supported)
514 		return (B_FALSE);
515 
516 	return (B_TRUE);
517 }
518 
519 static boolean_t
520 pqi_calculate_queue_resources(pqi_state_t *s)
521 {
522 	int	max_queue_groups;
523 	int	num_queue_groups;
524 	int	num_elements_per_iq;
525 	int	num_elements_per_oq;
526 
527 	if (reset_devices) {
528 		num_queue_groups = 1;
529 	} else {
530 		max_queue_groups = min(s->s_max_inbound_queues / 2,
531 		    s->s_max_outbound_queues - 1);
532 		max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
533 
534 		num_queue_groups = min(ncpus, s->s_intr_cnt);
535 		num_queue_groups = min(num_queue_groups, max_queue_groups);
536 	}
537 	s->s_num_queue_groups = num_queue_groups;
538 
539 	s->s_max_inbound_iu_length =
540 	    (s->s_max_inbound_iu_length_per_firmware /
541 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
542 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
543 
544 	num_elements_per_iq = s->s_max_inbound_iu_length /
545 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
546 	/* ---- add one because one element in each queue is unusable ---- */
547 	num_elements_per_iq++;
548 
549 	num_elements_per_iq = min(num_elements_per_iq,
550 	    s->s_max_elements_per_iq);
551 
552 	num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
553 	num_elements_per_oq = min(num_elements_per_oq,
554 	    s->s_max_elements_per_oq);
555 
556 	s->s_num_elements_per_iq = num_elements_per_iq;
557 	s->s_num_elements_per_oq = num_elements_per_oq;
558 
559 	s->s_max_sg_per_iu = ((s->s_max_inbound_iu_length -
560 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
561 	    sizeof (struct pqi_sg_entry)) +
562 	    PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
563 	return (B_TRUE);
564 }
565 
566 static boolean_t
567 pqi_alloc_io_resource(pqi_state_t *s)
568 {
569 	pqi_io_request_t	*io;
570 	size_t			sg_chain_len;
571 	int			i;
572 
573 	s->s_io_rqst_pool = kmem_zalloc(s->s_max_io_slots * sizeof (*io),
574 	    KM_SLEEP);
575 
576 	sg_chain_len = s->s_sg_chain_buf_length;
577 	io = s->s_io_rqst_pool;
578 	for (i = 0; i < s->s_max_io_slots; i++) {
579 		io->io_iu = kmem_zalloc(s->s_max_inbound_iu_length, KM_SLEEP);
580 
581 		/*
582 		 * TODO: Don't allocate dma space here. Move this to
583 		 * init_pkt when it's clear the data being transferred
584 		 * will not fit in the four SG slots provided by each
585 		 * command.
586 		 */
587 		io->io_sg_chain_dma = pqi_alloc_single(s, sg_chain_len);
588 		if (io->io_sg_chain_dma == NULL)
589 			goto error_out;
590 
591 		mutex_init(&io->io_lock, NULL, MUTEX_DRIVER, NULL);
592 		io->io_gen = 1;
593 		list_link_init(&io->io_list_node);
594 		io->io_index = (uint16_t)i;
595 
596 		io->io_softc = s;
597 		io++;
598 	}
599 
600 	return (B_TRUE);
601 
602 error_out:
603 	for (i = 0; i < s->s_max_io_slots; i++) {
604 		if (io->io_iu != NULL) {
605 			kmem_free(io->io_iu, s->s_max_inbound_iu_length);
606 			io->io_iu = NULL;
607 		}
608 		if (io->io_sg_chain_dma != NULL) {
609 			pqi_free_single(s, io->io_sg_chain_dma);
610 			io->io_sg_chain_dma = NULL;
611 		}
612 	}
613 	kmem_free(s->s_io_rqst_pool, s->s_max_io_slots * sizeof (*io));
614 	s->s_io_rqst_pool = NULL;
615 
616 	return (B_FALSE);
617 }
618 
619 static boolean_t
620 pqi_alloc_operation_queues(pqi_state_t *s)
621 {
622 	uint32_t	niq = s->s_num_queue_groups * 2;
623 	uint32_t	noq = s->s_num_queue_groups;
624 	uint32_t	queue_idx = (s->s_num_queue_groups * 3) + 1;
625 	uint32_t	i;
626 	size_t		array_len_iq;
627 	size_t		array_len_oq;
628 	size_t		alloc_len;
629 	caddr_t		aligned_pointer = NULL;
630 	pqi_queue_group_t	*qg;
631 
632 	array_len_iq = PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
633 	    s->s_num_elements_per_iq;
634 	array_len_oq = PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
635 	    s->s_num_elements_per_oq;
636 
637 	for (i = 0; i < niq; i++) {
638 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
639 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
640 		aligned_pointer += array_len_iq;
641 	}
642 
643 	for (i = 0; i < noq; i++) {
644 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
645 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
646 		aligned_pointer += array_len_oq;
647 	}
648 
649 	aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
650 	    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
651 	aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
652 	    PQI_EVENT_OQ_ELEMENT_LENGTH;
653 
654 	for (i = 0; i < queue_idx; i++) {
655 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
656 		    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
657 		aligned_pointer += sizeof (pqi_index_t);
658 	}
659 
660 	alloc_len = (size_t)aligned_pointer +
661 	    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT + PQI_EXTRA_SGL_MEMORY;
662 	if ((s->s_queue_dma = pqi_alloc_single(s, alloc_len)) == NULL)
663 		return (B_FALSE);
664 
665 	aligned_pointer = PQIALIGN_TYPED(s->s_queue_dma->alloc_memory,
666 	    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
667 	for (i = 0; i < s->s_num_queue_groups; i++) {
668 		qg = &s->s_queue_groups[i];
669 
670 		qg->iq_pi_copy[0] = 0;
671 		qg->iq_pi_copy[1] = 0;
672 		qg->oq_ci_copy = 0;
673 		qg->iq_element_array[RAID_PATH] = aligned_pointer;
674 		qg->iq_element_array_bus_addr[RAID_PATH] =
675 		    s->s_queue_dma->dma_addr +
676 		    ((uintptr_t)aligned_pointer -
677 		    (uintptr_t)s->s_queue_dma->alloc_memory);
678 
679 		aligned_pointer += array_len_iq;
680 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
681 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
682 
683 		qg->iq_element_array[AIO_PATH] = aligned_pointer;
684 		qg->iq_element_array_bus_addr[AIO_PATH] =
685 		    s->s_queue_dma->dma_addr +
686 		    ((uintptr_t)aligned_pointer -
687 		    (uintptr_t)s->s_queue_dma->alloc_memory);
688 
689 		aligned_pointer += array_len_iq;
690 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
691 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
692 	}
693 	for (i = 0; i < s->s_num_queue_groups; i++) {
694 		qg = &s->s_queue_groups[i];
695 
696 		qg->oq_element_array = aligned_pointer;
697 		qg->oq_element_array_bus_addr =
698 		    s->s_queue_dma->dma_addr +
699 		    ((uintptr_t)aligned_pointer -
700 		    (uintptr_t)s->s_queue_dma->alloc_memory);
701 
702 		aligned_pointer += array_len_oq;
703 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
704 		    PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT, caddr_t);
705 	}
706 
707 	s->s_event_queue.oq_element_array = aligned_pointer;
708 	s->s_event_queue.oq_element_array_bus_addr =
709 	    s->s_queue_dma->dma_addr +
710 	    ((uintptr_t)aligned_pointer -
711 	    (uintptr_t)s->s_queue_dma->alloc_memory);
712 	aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
713 	    PQI_EVENT_OQ_ELEMENT_LENGTH;
714 
715 	aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
716 	    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
717 
718 	for (i = 0; i < s->s_num_queue_groups; i++) {
719 		qg = &s->s_queue_groups[i];
720 
721 		/* LINTED E_BAD_PTR_CAST_ALIGN */
722 		qg->iq_ci[RAID_PATH] = (pqi_index_t *)aligned_pointer;
723 		qg->iq_ci_bus_addr[RAID_PATH] =
724 		    s->s_queue_dma->dma_addr +
725 		    ((uintptr_t)aligned_pointer -
726 		    (uintptr_t)s->s_queue_dma->alloc_memory);
727 
728 		aligned_pointer += sizeof (pqi_index_t);
729 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
730 		    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
731 
732 		/* LINTED E_BAD_PTR_CAST_ALIGN */
733 		qg->iq_ci[AIO_PATH] = (pqi_index_t *)aligned_pointer;
734 		qg->iq_ci_bus_addr[AIO_PATH] =
735 		    s->s_queue_dma->dma_addr +
736 		    ((uintptr_t)aligned_pointer -
737 		    (uintptr_t)s->s_queue_dma->alloc_memory);
738 
739 		aligned_pointer += sizeof (pqi_index_t);
740 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
741 		    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
742 
743 		/* LINTED E_BAD_PTR_CAST_ALIGN */
744 		qg->oq_pi = (pqi_index_t *)aligned_pointer;
745 		qg->oq_pi_bus_addr =
746 		    s->s_queue_dma->dma_addr +
747 		    ((uintptr_t)aligned_pointer -
748 		    (uintptr_t)s->s_queue_dma->alloc_memory);
749 
750 		aligned_pointer += sizeof (pqi_index_t);
751 		aligned_pointer = PQIALIGN_TYPED(aligned_pointer,
752 		    PQI_OPERATIONAL_INDEX_ALIGNMENT, caddr_t);
753 	}
754 
755 	/* LINTED E_BAD_PTR_CAST_ALIGN */
756 	s->s_event_queue.oq_pi = (pqi_index_t *)aligned_pointer;
757 	s->s_event_queue.oq_pi_bus_addr =
758 	    s->s_queue_dma->dma_addr +
759 	    ((uintptr_t)aligned_pointer -
760 	    (uintptr_t)s->s_queue_dma->alloc_memory);
761 	ASSERT((uintptr_t)aligned_pointer -
762 	    (uintptr_t)s->s_queue_dma->alloc_memory +
763 	    sizeof (pqi_index_t) <= s->s_queue_dma->len_to_alloc);
764 
765 	return (B_TRUE);
766 }
767 
768 static boolean_t
769 pqi_init_operational_queues(pqi_state_t *s)
770 {
771 	int		i;
772 	uint16_t	iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
773 	uint16_t	oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
774 
775 	for (i = 0; i < s->s_num_queue_groups; i++) {
776 		s->s_queue_groups[i].qg_softc = s;
777 	}
778 	s->s_event_queue.oq_id = oq_id++;
779 	for (i = 0; i < s->s_num_queue_groups; i++) {
780 		s->s_queue_groups[i].iq_id[RAID_PATH] = iq_id++;
781 		s->s_queue_groups[i].iq_id[AIO_PATH] = iq_id++;
782 		s->s_queue_groups[i].oq_id = oq_id++;
783 		s->s_queue_groups[i].qg_active = B_TRUE;
784 	}
785 	s->s_event_queue.int_msg_num = 0;
786 	for (i = 0; i < s->s_num_queue_groups; i++)
787 		s->s_queue_groups[i].int_msg_num = (uint16_t)i;
788 
789 	return (B_TRUE);
790 }
791 
792 static boolean_t
793 pqi_init_operational_locks(pqi_state_t *s)
794 {
795 	int	i;
796 
797 	for (i = 0; i < s->s_num_queue_groups; i++) {
798 		mutex_init(&s->s_queue_groups[i].submit_lock[0], NULL,
799 		    MUTEX_DRIVER, NULL);
800 		mutex_init(&s->s_queue_groups[i].submit_lock[1], NULL,
801 		    MUTEX_DRIVER, NULL);
802 		list_create(&s->s_queue_groups[i].request_list[RAID_PATH],
803 		    sizeof (pqi_io_request_t),
804 		    offsetof(struct pqi_io_request, io_list_node));
805 		list_create(&s->s_queue_groups[i].request_list[AIO_PATH],
806 		    sizeof (pqi_io_request_t),
807 		    offsetof(struct pqi_io_request, io_list_node));
808 	}
809 	return (B_TRUE);
810 }
811 
812 static boolean_t
813 pqi_create_queues(pqi_state_t *s)
814 {
815 	int	i;
816 
817 	if (create_event_queue(s) == B_FALSE)
818 		return (B_FALSE);
819 
820 	for (i = 0; i < s->s_num_queue_groups; i++) {
821 		if (create_queue_group(s, i) == B_FALSE) {
822 			return (B_FALSE);
823 		}
824 	}
825 
826 	return (B_TRUE);
827 }
828 
829 static boolean_t
830 pqi_change_irq_mode(pqi_state_t *s)
831 {
832 	/* ---- Device already is in MSIX mode ---- */
833 	s->s_intr_ready = B_TRUE;
834 	return (B_TRUE);
835 }
836 
837 static boolean_t
838 pqi_start_heartbeat_timer(pqi_state_t *s)
839 {
840 	s->s_last_heartbeat_count = 0;
841 	s->s_last_intr_count = 0;
842 
843 	s->s_watchdog = timeout(pqi_watchdog, s, drv_usectohz(WATCHDOG));
844 	return (B_TRUE);
845 }
846 
847 #define	PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
848 	(offsetof(struct pqi_event_config, descriptors) + \
849 	(PQI_MAX_EVENT_DESCRIPTORS * sizeof (pqi_event_descriptor_t)))
850 
851 static boolean_t
852 pqi_enable_events(pqi_state_t *s)
853 {
854 	int			i;
855 	pqi_event_config_t	*ec;
856 	pqi_event_descriptor_t	*desc;
857 	pqi_general_mgmt_rqst_t	rqst;
858 	pqi_dma_overhead_t	*dma;
859 	pqi_sg_entry_t		*sg;
860 	boolean_t		rval = B_FALSE;
861 
862 	(void) memset(&rqst, 0, sizeof (rqst));
863 	dma = pqi_alloc_single(s, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH);
864 	if (dma == NULL)
865 		return (B_FALSE);
866 
867 	rqst.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
868 	rqst.header.iu_length = offsetof(struct pqi_general_management_request,
869 	    data.report_event_configuration.sg_descriptors[1]) -
870 	    PQI_REQUEST_HEADER_LENGTH;
871 	rqst.data.report_event_configuration.buffer_length =
872 	    PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH;
873 	sg = &rqst.data.report_event_configuration.sg_descriptors[0];
874 	sg->sg_addr = dma->dma_addr;
875 	sg->sg_len = dma->len_to_alloc;
876 	sg->sg_flags = CISS_SG_LAST;
877 
878 	if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
879 		goto error_out;
880 
881 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
882 	ec = (pqi_event_config_t *)dma->alloc_memory;
883 	for (i = 0; i < ec->num_event_descriptors; i++) {
884 		desc = &ec->descriptors[i];
885 		if (pqi_supported_event(desc->event_type) == B_TRUE)
886 			desc->oq_id = s->s_event_queue.oq_id;
887 		else
888 			desc->oq_id = 0;
889 	}
890 
891 	rqst.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
892 	rqst.header.iu_length = offsetof(struct pqi_general_management_request,
893 	    data.report_event_configuration.sg_descriptors[1]) -
894 	    PQI_REQUEST_HEADER_LENGTH;
895 	rqst.data.report_event_configuration.buffer_length =
896 	    PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH;
897 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORDEV);
898 
899 	rval = submit_raid_rqst_sync(s, &rqst.header, NULL);
900 
901 error_out:
902 	pqi_free_single(s, dma);
903 	return (rval);
904 }
905 
906 /*
907  * pqi_get_hba_version -- find HBA's version number
908  */
909 static boolean_t
910 pqi_get_hba_version(pqi_state_t *s)
911 {
912 	bmic_identify_controller_t	*ident;
913 	boolean_t			rval = B_FALSE;
914 
915 	ident = kmem_zalloc(sizeof (*ident), KM_SLEEP);
916 	if (identify_controller(s, ident) == B_FALSE)
917 		goto out;
918 	(void) memcpy(s->s_firmware_version, ident->firmware_version,
919 	    sizeof (ident->firmware_version));
920 	s->s_firmware_version[sizeof (ident->firmware_version)] = '\0';
921 	(void) snprintf(s->s_firmware_version + strlen(s->s_firmware_version),
922 	    sizeof (s->s_firmware_version) - strlen(s->s_firmware_version),
923 	    "-%u", ident->firmware_build_number);
924 	rval = B_TRUE;
925 	cmn_err(CE_NOTE, "!smartpqi%d - firmware version: %s",
926 	    s->s_instance, s->s_firmware_version);
927 out:
928 	kmem_free(ident, sizeof (*ident));
929 	return (rval);
930 }
931 
932 /*
933  * pqi_version_to_hba -- send driver version to HBA
934  */
935 static boolean_t
936 pqi_version_to_hba(pqi_state_t *s)
937 {
938 	bmic_host_wellness_driver_version_t	*b;
939 	boolean_t				rval = B_FALSE;
940 
941 	b = kmem_zalloc(sizeof (*b), KM_SLEEP);
942 	b->start_tag[0] = '<';
943 	b->start_tag[1] = 'H';
944 	b->start_tag[2] = 'W';
945 	b->start_tag[3] = '>';
946 	b->drv_tag[0] = 'D';
947 	b->drv_tag[1] = 'V';
948 	b->driver_version_length = sizeof (b->driver_version);
949 	(void) snprintf(b->driver_version, sizeof (b->driver_version),
950 	    "Illumos 1.0");
951 	b->end_tag[0] = 'Z';
952 	b->end_tag[1] = 'Z';
953 
954 	rval = write_host_wellness(s, b, sizeof (*b));
955 	kmem_free(b, sizeof (*b));
956 
957 	return (rval);
958 }
959 
960 
961 static boolean_t
962 pqi_schedule_update_time_worker(pqi_state_t *s)
963 {
964 	update_time(s);
965 	return (B_TRUE);
966 }
967 
968 static boolean_t
969 pqi_scan_scsi_devices(pqi_state_t *s)
970 {
971 	report_phys_lun_extended_t	*phys_list	= NULL;
972 	report_log_lun_extended_t	*logical_list	= NULL;
973 	boolean_t			rval		= B_FALSE;
974 	int				num_phys	= 0;
975 	int				num_logical	= 0;
976 	int				i;
977 	pqi_device_t			*dev;
978 
979 	if (get_device_list(s, &phys_list, &logical_list) == B_FALSE)
980 		goto error_out;
981 
982 	if (phys_list) {
983 		num_phys = ntohl(phys_list->header.list_length) /
984 		    sizeof (phys_list->lun_entries[0]);
985 	}
986 
987 	if (logical_list) {
988 		num_logical = ntohl(logical_list->header.list_length) /
989 		    sizeof (logical_list->lun_entries[0]);
990 	}
991 
992 	/*
993 	 * Need to look for devices that are no longer available. The call
994 	 * below to is_new_dev() will mark either the new device just created
995 	 * as having been scanned or if is_new_dev() finds an existing
996 	 * device in the list that one will be marked as scanned.
997 	 */
998 	mutex_enter(&s->s_mutex);
999 	for (dev = list_head(&s->s_devnodes); dev != NULL;
1000 	    dev = list_next(&s->s_devnodes, dev)) {
1001 		dev->pd_scanned = 0;
1002 	}
1003 	mutex_exit(&s->s_mutex);
1004 
1005 	for (i = 0; i < (num_phys + num_logical); i++) {
1006 		if (i < num_phys) {
1007 			dev = create_phys_dev(s, &phys_list->lun_entries[i]);
1008 		} else {
1009 			dev = create_logical_dev(s,
1010 			    &logical_list->lun_entries[i - num_phys]);
1011 		}
1012 		if (dev != NULL) {
1013 			if (is_new_dev(s, dev) == B_TRUE) {
1014 				list_create(&dev->pd_cmd_list,
1015 				    sizeof (struct pqi_cmd),
1016 				    offsetof(struct pqi_cmd, pc_list));
1017 				mutex_init(&dev->pd_mutex, NULL, MUTEX_DRIVER,
1018 				    NULL);
1019 
1020 				mutex_enter(&s->s_mutex);
1021 				list_insert_tail(&s->s_devnodes, dev);
1022 				mutex_exit(&s->s_mutex);
1023 			} else {
1024 				ddi_devid_free_guid(dev->pd_guid);
1025 				kmem_free(dev, sizeof (*dev));
1026 			}
1027 		}
1028 	}
1029 
1030 	/*
1031 	 * Now look through the list for devices which have disappeared.
1032 	 * Mark them as being offline. During the call to config_one, which
1033 	 * will come next during a hotplug event, those devices will be
1034 	 * offlined to the SCSI subsystem.
1035 	 */
1036 	mutex_enter(&s->s_mutex);
1037 	for (dev = list_head(&s->s_devnodes); dev != NULL;
1038 	    dev = list_next(&s->s_devnodes, dev)) {
1039 		if (dev->pd_scanned)
1040 			dev->pd_online = 1;
1041 		else
1042 			dev->pd_online = 0;
1043 	}
1044 
1045 	mutex_exit(&s->s_mutex);
1046 
1047 	rval = B_TRUE;
1048 
1049 error_out:
1050 
1051 	if (phys_list != NULL)
1052 		kmem_free(phys_list, ntohl(phys_list->header.list_length) +
1053 		    sizeof (report_lun_header_t));
1054 	if (logical_list != NULL)
1055 		kmem_free(logical_list,
1056 		    ntohl(logical_list->header.list_length) +
1057 		    sizeof (report_lun_header_t));
1058 	return (rval);
1059 }
1060 
1061 /*
1062  * []----------------------------------------------------------[]
1063  * | Entry points used by other funtions found in other files	|
1064  * []----------------------------------------------------------[]
1065  */
1066 void
1067 pqi_rescan_devices(pqi_state_t *s)
1068 {
1069 	(void) pqi_scan_scsi_devices(s);
1070 }
1071 
1072 boolean_t
1073 pqi_scsi_inquiry(pqi_state_t *s, pqi_device_t *dev, int vpd,
1074     struct scsi_inquiry *inq, int len)
1075 {
1076 	pqi_raid_path_request_t rqst;
1077 
1078 	if (build_raid_path_request(&rqst, SCMD_INQUIRY,
1079 	    dev->pd_scsi3addr, len, vpd) == B_FALSE)
1080 		return (B_FALSE);
1081 
1082 	return (scsi_common(s, &rqst, (caddr_t)inq, len));
1083 }
1084 
1085 void
1086 pqi_free_io_resource(pqi_state_t *s)
1087 {
1088 	pqi_io_request_t	*io = s->s_io_rqst_pool;
1089 	int			i;
1090 
1091 	if (io == NULL)
1092 		return;
1093 
1094 	for (i = 0; i < s->s_max_io_slots; i++) {
1095 		if (io->io_iu == NULL)
1096 			break;
1097 		kmem_free(io->io_iu, s->s_max_inbound_iu_length);
1098 		io->io_iu = NULL;
1099 		pqi_free_single(s, io->io_sg_chain_dma);
1100 		io->io_sg_chain_dma = NULL;
1101 	}
1102 
1103 	kmem_free(s->s_io_rqst_pool, s->s_max_io_slots * sizeof (*io));
1104 	s->s_io_rqst_pool = NULL;
1105 }
1106 
1107 /*
1108  * []----------------------------------------------------------[]
1109  * | Utility functions for startup code.			|
1110  * []----------------------------------------------------------[]
1111  */
1112 
1113 static boolean_t
1114 scsi_common(pqi_state_t *s, pqi_raid_path_request_t *rqst, caddr_t buf, int len)
1115 {
1116 	pqi_dma_overhead_t	*dma;
1117 	pqi_sg_entry_t		*sg;
1118 	boolean_t		rval = B_FALSE;
1119 
1120 	if ((dma = pqi_alloc_single(s, len)) == NULL)
1121 		return (B_FALSE);
1122 
1123 	sg = &rqst->rp_sglist[0];
1124 	sg->sg_addr = dma->dma_addr;
1125 	sg->sg_len = dma->len_to_alloc;
1126 	sg->sg_flags = CISS_SG_LAST;
1127 
1128 	if (submit_raid_rqst_sync(s, &rqst->header, NULL) == B_FALSE)
1129 		goto out;
1130 
1131 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1132 	(void) memcpy(buf, dma->alloc_memory, len);
1133 	rval = B_TRUE;
1134 out:
1135 	pqi_free_single(s, dma);
1136 	return (rval);
1137 }
1138 
1139 static void
1140 bcopy_fromregs(pqi_state_t *s, uint8_t *iomem, uint8_t *dst, uint32_t len)
1141 {
1142 	int	i;
1143 
1144 	for (i = 0; i < len; i++) {
1145 		*dst++ = ddi_get8(s->s_datap, iomem + i);
1146 	}
1147 }
1148 
1149 static void
1150 submit_admin_request(pqi_state_t *s, pqi_general_admin_request_t *r)
1151 {
1152 	pqi_admin_queues_t	*aq;
1153 	pqi_index_t		iq_pi;
1154 	caddr_t			next_element;
1155 
1156 	aq = &s->s_admin_queues;
1157 	iq_pi = aq->iq_pi_copy;
1158 	next_element = aq->iq_element_array + (iq_pi *
1159 	    PQI_ADMIN_IQ_ELEMENT_LENGTH);
1160 	(void) memcpy(next_element, r, sizeof (*r));
1161 	(void) ddi_dma_sync(s->s_adminq_dma->handle,
1162 	    iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH, sizeof (*r),
1163 	    DDI_DMA_SYNC_FORDEV);
1164 	iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
1165 	aq->iq_pi_copy = iq_pi;
1166 
1167 	ddi_put32(s->s_datap, aq->iq_pi, iq_pi);
1168 }
1169 
1170 static boolean_t
1171 poll_for_admin_response(pqi_state_t *s, pqi_general_admin_response_t *r)
1172 {
1173 	pqi_admin_queues_t	*aq;
1174 	pqi_index_t		oq_pi;
1175 	pqi_index_t		oq_ci;
1176 	int			countdown = 10 * MICROSEC;	/* 10 seconds */
1177 	int			pause_time = 10 * MILLISEC;	/* 10ms */
1178 
1179 	countdown /= pause_time;
1180 	aq = &s->s_admin_queues;
1181 	oq_ci = aq->oq_ci_copy;
1182 
1183 	while (--countdown) {
1184 		oq_pi = ddi_get32(s->s_adminq_dma->acc, aq->oq_pi);
1185 		if (oq_pi != oq_ci)
1186 			break;
1187 		drv_usecwait(pause_time);
1188 	}
1189 	if (countdown == 0)
1190 		return (B_FALSE);
1191 
1192 	(void) ddi_dma_sync(s->s_adminq_dma->handle,
1193 	    oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH, sizeof (*r),
1194 	    DDI_DMA_SYNC_FORCPU);
1195 	(void) memcpy(r, aq->oq_element_array +
1196 	    (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof (*r));
1197 
1198 	aq->oq_ci_copy = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
1199 	ddi_put32(s->s_datap, aq->oq_ci, aq->oq_ci_copy);
1200 
1201 	return (B_TRUE);
1202 }
1203 
1204 static boolean_t
1205 validate_admin_response(pqi_general_admin_response_t *r, uint8_t code)
1206 {
1207 	if (r->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
1208 		return (B_FALSE);
1209 
1210 	if (r->header.iu_length != PQI_GENERAL_ADMIN_IU_LENGTH)
1211 		return (B_FALSE);
1212 
1213 	if (r->function_code != code)
1214 		return (B_FALSE);
1215 
1216 	if (r->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
1217 		return (B_FALSE);
1218 
1219 	return (B_TRUE);
1220 }
1221 
1222 static boolean_t
1223 submit_admin_rqst_sync(pqi_state_t *s,
1224     pqi_general_admin_request_t *rqst, pqi_general_admin_response_t *rsp)
1225 {
1226 	boolean_t	rval;
1227 
1228 	submit_admin_request(s, rqst);
1229 	rval = poll_for_admin_response(s, rsp);
1230 	if (rval == B_TRUE) {
1231 		rval = validate_admin_response(rsp, rqst->function_code);
1232 		if (rval == B_FALSE) {
1233 			pqi_show_dev_state(s);
1234 		}
1235 	}
1236 	return (rval);
1237 }
1238 
1239 static boolean_t
1240 create_event_queue(pqi_state_t *s)
1241 {
1242 	pqi_event_queue_t		*eq;
1243 	pqi_general_admin_request_t	request;
1244 	pqi_general_admin_response_t	response;
1245 
1246 	eq = &s->s_event_queue;
1247 
1248 	/*
1249 	 * Create OQ (Outbound Queue - device to host queue) to dedicate
1250 	 * to events.
1251 	 */
1252 	(void) memset(&request, 0, sizeof (request));
1253 	request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1254 	request.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1255 	request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
1256 	request.data.create_operational_oq.queue_id = eq->oq_id;
1257 	request.data.create_operational_oq.element_array_addr =
1258 	    eq->oq_element_array_bus_addr;
1259 	request.data.create_operational_oq.pi_addr = eq->oq_pi_bus_addr;
1260 	request.data.create_operational_oq.num_elements =
1261 	    PQI_NUM_EVENT_QUEUE_ELEMENTS;
1262 	request.data.create_operational_oq.element_length =
1263 	    PQI_EVENT_OQ_ELEMENT_LENGTH / 16;
1264 	request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
1265 	request.data.create_operational_oq.int_msg_num = eq->int_msg_num;
1266 
1267 	if (submit_admin_rqst_sync(s, &request, &response) == B_FALSE)
1268 		return (B_FALSE);
1269 
1270 	eq->oq_ci = (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1271 	    PQI_DEVICE_REGISTERS_OFFSET +
1272 	    response.data.create_operational_oq.oq_ci_offset);
1273 
1274 	return (B_TRUE);
1275 }
1276 
1277 static boolean_t
1278 create_queue_group(pqi_state_t *s, int idx)
1279 {
1280 	pqi_queue_group_t		*qg;
1281 	pqi_general_admin_request_t	rqst;
1282 	pqi_general_admin_response_t	rsp;
1283 
1284 	qg = &s->s_queue_groups[idx];
1285 
1286 	/* ---- Create inbound queue for RAID path (host to device) ---- */
1287 	(void) memset(&rqst, 0, sizeof (rqst));
1288 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1289 	rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1290 	rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
1291 	rqst.data.create_operational_iq.queue_id = qg->iq_id[RAID_PATH];
1292 	rqst.data.create_operational_iq.element_array_addr =
1293 	    qg->iq_element_array_bus_addr[RAID_PATH];
1294 	rqst.data.create_operational_iq.ci_addr =
1295 	    qg->iq_ci_bus_addr[RAID_PATH];
1296 	rqst.data.create_operational_iq.num_elements =
1297 	    s->s_num_elements_per_iq;
1298 	rqst.data.create_operational_iq.element_length =
1299 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16;
1300 	rqst.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
1301 
1302 	if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1303 		return (B_FALSE);
1304 	qg->iq_pi[RAID_PATH] =
1305 	    (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1306 	    PQI_DEVICE_REGISTERS_OFFSET +
1307 	    rsp.data.create_operational_iq.iq_pi_offset);
1308 
1309 	/* ---- Create inbound queue for Advanced I/O path. ---- */
1310 	(void) memset(&rqst, 0, sizeof (rqst));
1311 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1312 	rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1313 	rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
1314 	rqst.data.create_operational_iq.queue_id =
1315 	    qg->iq_id[AIO_PATH];
1316 	rqst.data.create_operational_iq.element_array_addr =
1317 	    qg->iq_element_array_bus_addr[AIO_PATH];
1318 	rqst.data.create_operational_iq.ci_addr =
1319 	    qg->iq_ci_bus_addr[AIO_PATH];
1320 	rqst.data.create_operational_iq.num_elements =
1321 	    s->s_num_elements_per_iq;
1322 	rqst.data.create_operational_iq.element_length =
1323 	    PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16;
1324 	rqst.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
1325 
1326 	if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1327 		return (B_FALSE);
1328 
1329 	qg->iq_pi[AIO_PATH] =
1330 	    (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1331 	    PQI_DEVICE_REGISTERS_OFFSET +
1332 	    rsp.data.create_operational_iq.iq_pi_offset);
1333 
1334 	/* ---- Change second queue to be AIO ---- */
1335 	(void) memset(&rqst, 0, sizeof (rqst));
1336 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1337 	rqst.header.iu_length =	PQI_GENERAL_ADMIN_IU_LENGTH;
1338 	rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
1339 	rqst.data.change_operational_iq_properties.queue_id =
1340 	    qg->iq_id[AIO_PATH];
1341 	rqst.data.change_operational_iq_properties.queue_id =
1342 	    PQI_IQ_PROPERTY_IS_AIO_QUEUE;
1343 
1344 	if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1345 		return (B_FALSE);
1346 
1347 	/* ---- Create outbound queue (device to host) ---- */
1348 	(void) memset(&rqst, 0, sizeof (rqst));
1349 	rqst.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
1350 	rqst.header.iu_length = PQI_GENERAL_ADMIN_IU_LENGTH;
1351 	rqst.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
1352 	rqst.data.create_operational_oq.queue_id = qg->oq_id;
1353 	rqst.data.create_operational_oq.element_array_addr =
1354 	    qg->oq_element_array_bus_addr;
1355 	rqst.data.create_operational_oq.pi_addr = qg->oq_pi_bus_addr;
1356 	rqst.data.create_operational_oq.num_elements =
1357 	    s->s_num_elements_per_oq;
1358 	rqst.data.create_operational_oq.element_length =
1359 	    PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16;
1360 	rqst.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
1361 	rqst.data.create_operational_oq.int_msg_num = qg->int_msg_num;
1362 
1363 	if (submit_admin_rqst_sync(s, &rqst, &rsp) == B_FALSE)
1364 		return (B_FALSE);
1365 	qg->oq_ci = (uint32_t *)(intptr_t)((uint64_t)(intptr_t)s->s_reg +
1366 	    PQI_DEVICE_REGISTERS_OFFSET +
1367 	    rsp.data.create_operational_oq.oq_ci_offset);
1368 
1369 	return (B_TRUE);
1370 }
1371 
1372 static void
1373 raid_sync_complete(pqi_io_request_t *io __unused, void *ctx)
1374 {
1375 	ksema_t *s = (ksema_t *)ctx;
1376 
1377 	sema_v(s);
1378 }
1379 
1380 static boolean_t
1381 submit_raid_sync_with_io(pqi_state_t *s, pqi_io_request_t *io)
1382 {
1383 	ksema_t	sema;
1384 
1385 	sema_init(&sema, 0, NULL, SEMA_DRIVER, NULL);
1386 
1387 	io->io_cb = raid_sync_complete;
1388 	io->io_context = &sema;
1389 
1390 	pqi_start_io(s, &s->s_queue_groups[PQI_DEFAULT_QUEUE_GROUP],
1391 	    RAID_PATH, io);
1392 	sema_p(&sema);
1393 
1394 	switch (io->io_status) {
1395 		case PQI_DATA_IN_OUT_GOOD:
1396 		case PQI_DATA_IN_OUT_UNDERFLOW:
1397 			return (B_TRUE);
1398 		default:
1399 			return (B_FALSE);
1400 	}
1401 }
1402 
1403 static boolean_t
1404 submit_raid_rqst_sync(pqi_state_t *s, pqi_iu_header_t *rqst,
1405     pqi_raid_error_info_t e_info __unused)
1406 {
1407 	pqi_io_request_t	*io;
1408 	size_t			len;
1409 	boolean_t		rval = B_FALSE; // default to error case
1410 	struct pqi_cmd		*c;
1411 
1412 	if ((io = pqi_alloc_io(s)) == NULL)
1413 		return (B_FALSE);
1414 
1415 	c = kmem_zalloc(sizeof (*c), KM_SLEEP);
1416 
1417 	mutex_init(&c->pc_mutex, NULL, MUTEX_DRIVER, NULL);
1418 	c->pc_io_rqst = io;
1419 	c->pc_device = &s->s_special_device;
1420 	c->pc_softc = s;
1421 	io->io_cmd = c;
1422 	(void) pqi_cmd_action(c, PQI_CMD_QUEUE);
1423 
1424 	((pqi_raid_path_request_t *)rqst)->rp_id = PQI_MAKE_REQID(io->io_index,
1425 	    io->io_gen);
1426 	if (rqst->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
1427 		((pqi_raid_path_request_t *)rqst)->rp_error_index =
1428 		    io->io_index;
1429 	len = rqst->iu_length + PQI_REQUEST_HEADER_LENGTH;
1430 	(void) memcpy(io->io_iu, rqst, len);
1431 
1432 	if (submit_raid_sync_with_io(s, io) == B_TRUE)
1433 		rval = B_TRUE;
1434 
1435 	(void) pqi_cmd_action(c, PQI_CMD_CMPLT);
1436 	mutex_destroy(&c->pc_mutex);
1437 	kmem_free(c, sizeof (*c));
1438 
1439 	return (rval);
1440 }
1441 
1442 static boolean_t
1443 build_raid_path_request(pqi_raid_path_request_t *rqst,
1444     int cmd, caddr_t lun, uint32_t len, int vpd_page)
1445 {
1446 	uint8_t		*cdb;
1447 
1448 	(void) memset(rqst, 0, sizeof (*rqst));
1449 	rqst->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
1450 	rqst->header.iu_length = offsetof(struct pqi_raid_path_request,
1451 	    rp_sglist[1]) - PQI_REQUEST_HEADER_LENGTH;
1452 	rqst->rp_data_len = len;
1453 	(void) memcpy(rqst->rp_lun, lun, sizeof (rqst->rp_lun));
1454 	rqst->rp_task_attr = SOP_TASK_ATTRIBUTE_SIMPLE;
1455 	rqst->rp_additional_cdb = SOP_ADDITIONAL_CDB_BYTES_0;
1456 
1457 	cdb = rqst->rp_cdb;
1458 	switch (cmd) {
1459 	case SCMD_READ_CAPACITY:
1460 		rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1461 		cdb[0] = (uint8_t)cmd;
1462 		break;
1463 
1464 	case SCMD_READ:
1465 		rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1466 		cdb[0] = (uint8_t)cmd;
1467 		cdb[2] = (uint8_t)(vpd_page >> 8);
1468 		cdb[3] = (uint8_t)vpd_page;
1469 		cdb[4] = len >> 9;
1470 		break;
1471 
1472 	case SCMD_MODE_SENSE:
1473 		rqst->rp_data_dir = (uint8_t)SOP_READ_FLAG;
1474 		cdb[0] = (uint8_t)cmd;
1475 		cdb[1] = 0;
1476 		cdb[2] = (uint8_t)vpd_page;
1477 		cdb[4] = (uint8_t)len;
1478 		break;
1479 
1480 	case SCMD_INQUIRY:
1481 		rqst->rp_data_dir = SOP_READ_FLAG;
1482 		cdb[0] = (uint8_t)cmd;
1483 		if (vpd_page & VPD_PAGE) {
1484 			cdb[1] = 0x1;
1485 			cdb[2] = (uint8_t)vpd_page;
1486 		}
1487 		cdb[4] = (uint8_t)len;
1488 		break;
1489 
1490 	case BMIC_IDENTIFY_PHYSICAL_DEVICE:
1491 	case BMIC_IDENTIFY_CONTROLLER:
1492 		rqst->rp_data_dir = SOP_READ_FLAG;
1493 		cdb[0] = BMIC_READ;
1494 		cdb[6] = (uint8_t)cmd;
1495 		cdb[7] = (uint8_t)(len >> 8);
1496 		cdb[8] = (uint8_t)len;
1497 		break;
1498 
1499 	case BMIC_WRITE_HOST_WELLNESS:
1500 		rqst->rp_data_dir = SOP_WRITE_FLAG;
1501 		cdb[0] = BMIC_WRITE;
1502 		cdb[6] = (uint8_t)cmd;
1503 		cdb[7] = (uint8_t)(len >> 8);
1504 		cdb[8] = (uint8_t)len;
1505 		break;
1506 
1507 	case CISS_REPORT_LOG:
1508 	case CISS_REPORT_PHYS:
1509 		rqst->rp_data_dir = SOP_READ_FLAG;
1510 		cdb[0] = (uint8_t)cmd;
1511 		if (cmd == CISS_REPORT_PHYS)
1512 			cdb[1] = CISS_REPORT_PHYS_EXTENDED;
1513 		else
1514 			cdb[1] = CISS_REPORT_LOG_EXTENDED;
1515 		cdb[6] = (uint8_t)(len >> 24);
1516 		cdb[7] = (uint8_t)(len >> 16);
1517 		cdb[8] = (uint8_t)(len >> 8);
1518 		cdb[9] = (uint8_t)len;
1519 		break;
1520 
1521 	default:
1522 		ASSERT(0);
1523 		break;
1524 	}
1525 
1526 	return (B_TRUE);
1527 }
1528 
1529 static boolean_t
1530 identify_physical_device(pqi_state_t *s, pqi_device_t *devp,
1531     bmic_identify_physical_device_t *buf)
1532 {
1533 	pqi_dma_overhead_t	*dma;
1534 	pqi_raid_path_request_t	rqst;
1535 	boolean_t		rval = B_FALSE;
1536 	uint16_t		idx;
1537 
1538 	if ((dma = pqi_alloc_single(s, sizeof (*buf))) == NULL)
1539 		return (B_FALSE);
1540 
1541 	if (build_raid_path_request(&rqst, BMIC_IDENTIFY_PHYSICAL_DEVICE,
1542 	    RAID_CTLR_LUNID, sizeof (*buf), 0) == B_FALSE)
1543 		goto out;
1544 
1545 	idx = CISS_GET_DRIVE_NUMBER(devp->pd_scsi3addr);
1546 	rqst.rp_cdb[2] = (uint8_t)idx;
1547 	rqst.rp_cdb[9] = (uint8_t)(idx >> 8);
1548 
1549 	rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1550 	rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1551 	rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1552 
1553 	if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1554 		goto out;
1555 
1556 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1557 	(void) memcpy(buf, dma->alloc_memory, sizeof (*buf));
1558 	rval = B_TRUE;
1559 out:
1560 	pqi_free_single(s, dma);
1561 	return (rval);
1562 }
1563 
1564 static boolean_t
1565 identify_controller(pqi_state_t *s, bmic_identify_controller_t *ident)
1566 {
1567 	pqi_raid_path_request_t	rqst;
1568 	pqi_dma_overhead_t	*dma;
1569 	boolean_t		rval = B_FALSE;
1570 
1571 	if ((dma = pqi_alloc_single(s, sizeof (*ident))) == NULL)
1572 		return (B_FALSE);
1573 
1574 	if (build_raid_path_request(&rqst, BMIC_IDENTIFY_CONTROLLER,
1575 	    RAID_CTLR_LUNID, sizeof (*ident), 0) == B_FALSE)
1576 		goto out;
1577 
1578 	rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1579 	rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1580 	rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1581 
1582 	if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1583 		goto out;
1584 
1585 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1586 	(void) memcpy(ident, dma->alloc_memory, sizeof (*ident));
1587 	rval = B_TRUE;
1588 out:
1589 	pqi_free_single(s, dma);
1590 	return (rval);
1591 }
1592 
1593 static boolean_t
1594 write_host_wellness(pqi_state_t *s, void *buf, size_t len)
1595 {
1596 	pqi_dma_overhead_t	*dma;
1597 	boolean_t		rval = B_FALSE;
1598 	pqi_raid_path_request_t	rqst;
1599 
1600 	if ((dma = pqi_alloc_single(s, len)) == NULL)
1601 		return (B_FALSE);
1602 	if (build_raid_path_request(&rqst, BMIC_WRITE_HOST_WELLNESS,
1603 	    RAID_CTLR_LUNID, len, 0) == B_FALSE)
1604 		goto out;
1605 
1606 	(void) memcpy(dma->alloc_memory, buf, dma->len_to_alloc);
1607 	rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1608 	rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1609 	rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1610 
1611 	rval = submit_raid_rqst_sync(s, &rqst.header, NULL);
1612 out:
1613 	pqi_free_single(s, dma);
1614 	return (rval);
1615 }
1616 
1617 static boolean_t
1618 report_luns(pqi_state_t *s, int cmd, void *data, size_t len)
1619 {
1620 	pqi_dma_overhead_t	*dma;
1621 	boolean_t		rval = B_FALSE;
1622 	pqi_raid_path_request_t	rqst;
1623 
1624 	if ((dma = pqi_alloc_single(s, len)) == NULL)
1625 		return (B_FALSE);
1626 	if (build_raid_path_request(&rqst, cmd, RAID_CTLR_LUNID,
1627 	    len, 0) == B_FALSE)
1628 		goto error_out;
1629 
1630 	rqst.rp_sglist[0].sg_addr = dma->dma_addr;
1631 	rqst.rp_sglist[0].sg_len = dma->len_to_alloc;
1632 	rqst.rp_sglist[0].sg_flags = CISS_SG_LAST;
1633 
1634 	if (submit_raid_rqst_sync(s, &rqst.header, NULL) == B_FALSE)
1635 		goto error_out;
1636 
1637 	(void) ddi_dma_sync(dma->handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1638 	(void) memcpy(data, dma->alloc_memory, len);
1639 	rval = B_TRUE;
1640 
1641 error_out:
1642 	pqi_free_single(s, dma);
1643 	return (rval);
1644 }
1645 
1646 static boolean_t
1647 report_luns_by_cmd(pqi_state_t *s, int cmd, void **buf)
1648 {
1649 	void		*data		= NULL;
1650 	size_t		data_len	= 0;
1651 	size_t		new_data_len;
1652 	uint32_t	new_list_len	= 0;
1653 	uint32_t	list_len	= 0;
1654 	boolean_t	rval		= B_FALSE;
1655 
1656 	new_data_len = sizeof (report_lun_header_t);
1657 	do {
1658 		if (data != NULL) {
1659 			kmem_free(data, data_len);
1660 		}
1661 		data_len = new_data_len;
1662 		data = kmem_zalloc(data_len, KM_SLEEP);
1663 		list_len = new_list_len;
1664 		if (report_luns(s, cmd, data, data_len) == B_FALSE)
1665 			goto error_out;
1666 		new_list_len =
1667 		    ntohl(((report_lun_header_t *)data)->list_length);
1668 		new_data_len = sizeof (report_lun_header_t) +
1669 		    new_list_len;
1670 	} while (new_list_len > list_len);
1671 	rval = B_TRUE;
1672 
1673 error_out:
1674 	if (rval == B_FALSE) {
1675 		kmem_free(data, data_len);
1676 		data = NULL;
1677 	}
1678 	*buf = data;
1679 	return (rval);
1680 }
1681 
1682 static inline boolean_t
1683 report_phys_luns(pqi_state_t *s, void **v)
1684 {
1685 	return (report_luns_by_cmd(s, CISS_REPORT_PHYS, v));
1686 }
1687 
1688 static inline boolean_t
1689 report_logical_luns(pqi_state_t *s, void **v)
1690 {
1691 	return (report_luns_by_cmd(s, CISS_REPORT_LOG, v));
1692 }
1693 
1694 static boolean_t
1695 get_device_list(pqi_state_t *s, report_phys_lun_extended_t **pl,
1696     report_log_lun_extended_t **ll)
1697 {
1698 	report_log_lun_extended_t	*log_data;
1699 	report_log_lun_extended_t	*internal_log;
1700 	size_t				list_len;
1701 	size_t				data_len;
1702 	report_lun_header_t		header;
1703 
1704 	if (report_phys_luns(s, (void **)pl) == B_FALSE)
1705 		return (B_FALSE);
1706 
1707 	if (report_logical_luns(s, (void **)ll) == B_FALSE)
1708 		return (B_FALSE);
1709 
1710 	log_data = *ll;
1711 	if (log_data) {
1712 		list_len = ntohl(log_data->header.list_length);
1713 	} else {
1714 		(void) memset(&header, 0, sizeof (header));
1715 		log_data = (report_log_lun_extended_t *)&header;
1716 		list_len = 0;
1717 	}
1718 
1719 	data_len = sizeof (header) + list_len;
1720 	/*
1721 	 * Add the controller to the logical luns which is a empty device
1722 	 */
1723 	internal_log = kmem_zalloc(data_len +
1724 	    sizeof (report_log_lun_extended_entry_t), KM_SLEEP);
1725 	(void) memcpy(internal_log, log_data, data_len);
1726 	internal_log->header.list_length = htonl(list_len +
1727 	    sizeof (report_log_lun_extended_entry_t));
1728 
1729 	if (*ll != NULL)
1730 		kmem_free(*ll, sizeof (report_lun_header_t) +
1731 		    ntohl((*ll)->header.list_length));
1732 	*ll = internal_log;
1733 	return (B_TRUE);
1734 }
1735 
1736 static boolean_t
1737 get_device_info(pqi_state_t *s, pqi_device_t *dev)
1738 {
1739 	boolean_t		rval = B_FALSE;
1740 	struct scsi_inquiry	*inq;
1741 
1742 	inq = kmem_zalloc(sizeof (*inq), KM_SLEEP);
1743 	if (pqi_scsi_inquiry(s, dev, 0, inq, sizeof (*inq)) == B_FALSE)
1744 		goto out;
1745 
1746 	dev->pd_devtype = inq->inq_dtype & 0x1f;
1747 	(void) memcpy(dev->pd_vendor, inq->inq_vid, sizeof (dev->pd_vendor));
1748 	(void) memcpy(dev->pd_model, inq->inq_pid, sizeof (dev->pd_model));
1749 
1750 	rval = B_TRUE;
1751 out:
1752 	kmem_free(inq, sizeof (*inq));
1753 	return (rval);
1754 }
1755 
1756 static boolean_t
1757 is_supported_dev(pqi_state_t *s, pqi_device_t *dev)
1758 {
1759 	boolean_t	rval = B_FALSE;
1760 
1761 	switch (dev->pd_devtype) {
1762 	case DTYPE_DIRECT:
1763 	case TYPE_ZBC:
1764 	case DTYPE_SEQUENTIAL:
1765 	case DTYPE_ESI:
1766 		rval = B_TRUE;
1767 		break;
1768 	case DTYPE_ARRAY_CTRL:
1769 		if (strncmp(dev->pd_scsi3addr, RAID_CTLR_LUNID,
1770 		    sizeof (dev->pd_scsi3addr)) == 0)
1771 			rval = B_TRUE;
1772 		break;
1773 	default:
1774 		dev_err(s->s_dip, CE_WARN, "%s is not a supported device",
1775 		    scsi_dname(dev->pd_devtype));
1776 		break;
1777 	}
1778 	return (rval);
1779 }
1780 
1781 static void
1782 get_phys_disk_info(pqi_state_t *s __unused, pqi_device_t *dev,
1783     bmic_identify_physical_device_t *id)
1784 {
1785 	dev->pd_lun = id->scsi_lun;
1786 	(void) snprintf(dev->pd_unit_address, sizeof (dev->pd_unit_address),
1787 	    "w%016lx,%d", dev->pd_wwid, id->scsi_lun);
1788 }
1789 
1790 static int
1791 is_external_raid_addr(char *addr)
1792 {
1793 	return (addr[2] != 0);
1794 }
1795 
1796 static void
1797 build_guid(pqi_state_t *s, pqi_device_t *d)
1798 {
1799 	int			len	= 0xff;
1800 	struct scsi_inquiry	*inq	= NULL;
1801 	uchar_t			*inq83	= NULL;
1802 	ddi_devid_t		devid;
1803 
1804 	ddi_devid_free_guid(d->pd_guid);
1805 	d->pd_guid = NULL;
1806 
1807 	inq = kmem_alloc(sizeof (struct scsi_inquiry), KM_SLEEP);
1808 	if (pqi_scsi_inquiry(s, d, 0, inq, sizeof (struct scsi_inquiry)) ==
1809 	    B_FALSE) {
1810 		goto out;
1811 	}
1812 
1813 	inq83 = kmem_zalloc(len, KM_SLEEP);
1814 	if (pqi_scsi_inquiry(s, d, VPD_PAGE | 0x83,
1815 	    (struct scsi_inquiry *)inq83, len) == B_FALSE) {
1816 		goto out;
1817 	}
1818 
1819 	if (ddi_devid_scsi_encode(DEVID_SCSI_ENCODE_VERSION_LATEST, NULL,
1820 	    (uchar_t *)inq, sizeof (struct scsi_inquiry), NULL, 0, inq83,
1821 	    (size_t)len, &devid) == DDI_SUCCESS) {
1822 		d->pd_guid = ddi_devid_to_guid(devid);
1823 		ddi_devid_free(devid);
1824 	}
1825 out:
1826 	if (inq != NULL)
1827 		kmem_free(inq, sizeof (struct scsi_inquiry));
1828 	if (inq83 != NULL)
1829 		kmem_free(inq83, len);
1830 }
1831 
1832 static pqi_device_t *
1833 create_phys_dev(pqi_state_t *s, report_phys_lun_extended_entry_t *e)
1834 {
1835 	pqi_device_t			*dev;
1836 	bmic_identify_physical_device_t	*id_phys	= NULL;
1837 
1838 	dev = kmem_zalloc(sizeof (*dev), KM_SLEEP);
1839 	dev->pd_phys_dev = 1;
1840 	dev->pd_wwid = htonll(e->wwid);
1841 	(void) memcpy(dev->pd_scsi3addr, e->lunid, sizeof (dev->pd_scsi3addr));
1842 
1843 	/* Skip masked physical devices */
1844 	if (MASKED_DEVICE(dev->pd_scsi3addr))
1845 		goto out;
1846 
1847 	if (get_device_info(s, dev) == B_FALSE)
1848 		goto out;
1849 
1850 	if (!is_supported_dev(s, dev))
1851 		goto out;
1852 
1853 	switch (dev->pd_devtype) {
1854 	case DTYPE_ESI:
1855 		build_guid(s, dev);
1856 		/* hopefully only LUN 0... which seems to match */
1857 		(void) snprintf(dev->pd_unit_address, 20, "w%016lx,0",
1858 		    dev->pd_wwid);
1859 		break;
1860 
1861 	case DTYPE_DIRECT:
1862 	case TYPE_ZBC:
1863 		build_guid(s, dev);
1864 		id_phys = kmem_zalloc(sizeof (*id_phys), KM_SLEEP);
1865 		if ((e->device_flags &
1866 		    REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1867 		    e->aio_handle) {
1868 
1869 			/*
1870 			 * XXX Until I figure out what's wrong with
1871 			 * using AIO I'll disable this for now.
1872 			 */
1873 			dev->pd_aio_enabled = 0;
1874 			dev->pd_aio_handle = e->aio_handle;
1875 			if (identify_physical_device(s, dev,
1876 			    id_phys) == B_FALSE)
1877 				goto out;
1878 		}
1879 		get_phys_disk_info(s, dev, id_phys);
1880 		kmem_free(id_phys, sizeof (*id_phys));
1881 		break;
1882 	}
1883 
1884 	return (dev);
1885 out:
1886 	kmem_free(dev, sizeof (*dev));
1887 	return (NULL);
1888 }
1889 
1890 static pqi_device_t *
1891 create_logical_dev(pqi_state_t *s, report_log_lun_extended_entry_t *e)
1892 {
1893 	pqi_device_t	*dev;
1894 	uint16_t	target;
1895 	uint16_t	lun;
1896 
1897 	dev = kmem_zalloc(sizeof (*dev), KM_SLEEP);
1898 	dev->pd_phys_dev = 0;
1899 	(void) memcpy(dev->pd_scsi3addr, e->lunid, sizeof (dev->pd_scsi3addr));
1900 	dev->pd_external_raid = is_external_raid_addr(dev->pd_scsi3addr);
1901 
1902 	if (get_device_info(s, dev) == B_FALSE)
1903 		goto out;
1904 
1905 	if (!is_supported_dev(s, dev))
1906 		goto out;
1907 
1908 	if (memcmp(dev->pd_scsi3addr, RAID_CTLR_LUNID, 8) == 0) {
1909 		target = 0;
1910 		lun = 0;
1911 	} else if (dev->pd_external_raid) {
1912 		target = (LE_IN16(&dev->pd_scsi3addr[2]) & 0x3FFF) + 2;
1913 		lun = dev->pd_scsi3addr[0];
1914 	} else {
1915 		target = 1;
1916 		lun = LE_IN16(dev->pd_scsi3addr);
1917 	}
1918 	dev->pd_target = target;
1919 	dev->pd_lun = lun;
1920 	(void) snprintf(dev->pd_unit_address, sizeof (dev->pd_unit_address),
1921 	    "%d,%d", target, lun);
1922 
1923 	(void) memcpy(dev->pd_volume_id, e->volume_id,
1924 	    sizeof (dev->pd_volume_id));
1925 	return (dev);
1926 
1927 out:
1928 	kmem_free(dev, sizeof (*dev));
1929 	return (NULL);
1930 }
1931 
1932 /*
1933  * is_new_dev -- look to see if new_dev is indeed new.
1934  *
1935  * NOTE: This function has two outcomes. One is to determine if the new_dev
1936  * is truly new. The other is to mark a new_dev as being scanned if it's
1937  * truly new or marking the existing device as having been scanned.
1938  */
1939 static boolean_t
1940 is_new_dev(pqi_state_t *s, pqi_device_t *new_dev)
1941 {
1942 	pqi_device_t	*dev;
1943 
1944 	for (dev = list_head(&s->s_devnodes); dev != NULL;
1945 	    dev = list_next(&s->s_devnodes, dev)) {
1946 		if (new_dev->pd_phys_dev != dev->pd_phys_dev) {
1947 			continue;
1948 		}
1949 		if (dev->pd_phys_dev) {
1950 			if (dev->pd_wwid == new_dev->pd_wwid) {
1951 				dev->pd_scanned = 1;
1952 				return (B_FALSE);
1953 			}
1954 		} else {
1955 			if (memcmp(dev->pd_volume_id, new_dev->pd_volume_id,
1956 			    16) == 0) {
1957 				dev->pd_scanned = 1;
1958 				return (B_FALSE);
1959 			}
1960 		}
1961 	}
1962 
1963 	new_dev->pd_scanned = 1;
1964 	return (B_TRUE);
1965 }
1966 
1967 enum pqi_reset_action {
1968 	PQI_RESET_ACTION_RESET = 0x1,
1969 	PQI_RESET_ACTION_COMPLETE = 0x2
1970 };
1971 
1972 enum pqi_reset_type {
1973 	PQI_RESET_TYPE_NO_RESET =	0x0,
1974 	PQI_RESET_TYPE_SOFT_RESET =	0x1,
1975 	PQI_RESET_TYPE_FIRM_RESET =	0x2,
1976 	PQI_RESET_TYPE_HARD_RESET =	0x3
1977 };
1978 
1979 boolean_t
1980 pqi_hba_reset(pqi_state_t *s)
1981 {
1982 	uint32_t	val;
1983 	int		max_count = 1000;
1984 
1985 	val = (PQI_RESET_ACTION_RESET << 5) | PQI_RESET_TYPE_HARD_RESET;
1986 	S32(s, pqi_registers.device_reset, val);
1987 
1988 	while (1) {
1989 		drv_usecwait(100 * (MICROSEC / MILLISEC));
1990 		val = G32(s, pqi_registers.device_reset);
1991 		if ((val >> 5) == PQI_RESET_ACTION_COMPLETE)
1992 			break;
1993 		if (max_count-- == 0)
1994 			break;
1995 	}
1996 
1997 #ifdef DEBUG
1998 	cmn_err(CE_WARN, "pqi_hba_reset: reset reg=0x%x, count=%d", val,
1999 	    max_count);
2000 #endif
2001 	return (pqi_wait_for_mode_ready(s));
2002 }
2003 
2004 static void
2005 save_ctrl_mode(pqi_state_t *s, int mode)
2006 {
2007 	sis_write_scratch(s, mode);
2008 }
2009 
2010 static boolean_t
2011 revert_to_sis(pqi_state_t *s)
2012 {
2013 	if (!pqi_hba_reset(s))
2014 		return (B_FALSE);
2015 	if (sis_reenable_mode(s) == B_FALSE)
2016 		return (B_FALSE);
2017 	sis_write_scratch(s, SIS_MODE);
2018 	return (B_TRUE);
2019 }
2020 
2021 
2022 #define	BIN2BCD(x)	((((x) / 10) << 4) + (x) % 10)
2023 
2024 static void
2025 update_time(void *v)
2026 {
2027 	pqi_state_t			*s = v;
2028 	bmic_host_wellness_time_t	*ht;
2029 	struct timeval			curtime;
2030 	todinfo_t			tod;
2031 
2032 	ht = kmem_zalloc(sizeof (*ht), KM_SLEEP);
2033 	ht->start_tag[0] = '<';
2034 	ht->start_tag[1] = 'H';
2035 	ht->start_tag[2] = 'W';
2036 	ht->start_tag[3] = '>';
2037 	ht->time_tag[0] = 'T';
2038 	ht->time_tag[1] = 'D';
2039 	ht->time_length = sizeof (ht->time);
2040 
2041 	uniqtime(&curtime);
2042 	mutex_enter(&tod_lock);
2043 	tod = utc_to_tod(curtime.tv_sec);
2044 	mutex_exit(&tod_lock);
2045 
2046 	ht->time[0] = BIN2BCD(tod.tod_hour);		/* Hour */
2047 	ht->time[1] = BIN2BCD(tod.tod_min);		/* Minute */
2048 	ht->time[2] = BIN2BCD(tod.tod_sec);		/* Second */
2049 	ht->time[3] = 0;
2050 	ht->time[4] = BIN2BCD(tod.tod_month);		/* Month */
2051 	ht->time[5] = BIN2BCD(tod.tod_day);		/* Day */
2052 	ht->time[6] = BIN2BCD(20);			/* Century */
2053 	ht->time[7] = BIN2BCD(tod.tod_year - 70);	/* Year w/in century */
2054 
2055 	ht->dont_write_tag[0] = 'D';
2056 	ht->dont_write_tag[1] = 'W';
2057 	ht->end_tag[0] = 'Z';
2058 	ht->end_tag[1] = 'Z';
2059 
2060 	(void) write_host_wellness(s, ht, sizeof (*ht));
2061 	kmem_free(ht, sizeof (*ht));
2062 	s->s_time_of_day = timeout(update_time, s,
2063 	    DAY * drv_usectohz(MICROSEC));
2064 }
2065