1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
25  * Copyright 2019 Joyent, Inc.
26  * Copyright 2014 OmniTI Computer Consulting, Inc. All rights reserved.
27  * Copyright (c) 2014, Tegile Systems Inc. All rights reserved.
28  * Copyright 2023 Oxide Computer Company
29  * Copyright 2023 Racktop Systems, Inc.
30  */
31 
32 /*
33  * Copyright (c) 2000 to 2010, LSI Corporation.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms of all code within
37  * this file that is exclusively owned by LSI, with or without
38  * modification, is permitted provided that, in addition to the CDDL 1.0
39  * License requirements, the following conditions are met:
40  *
41  *    Neither the name of the author nor the names of its contributors may be
42  *    used to endorse or promote products derived from this software without
43  *    specific prior written permission.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
46  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
47  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
48  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
49  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
50  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
51  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
52  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
53  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
54  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
55  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
56  * DAMAGE.
57  */
58 
59 /*
60  * mptsas - This is a driver based on LSI Logic's MPT2.0/2.5 interface.
61  *
62  */
63 
64 #if defined(lint) || defined(DEBUG)
65 #define	MPTSAS_DEBUG
66 #endif
67 
68 /*
69  * standard header files.
70  */
71 #include <sys/note.h>
72 #include <sys/scsi/scsi.h>
73 #include <sys/pci.h>
74 #include <sys/file.h>
75 #include <sys/policy.h>
76 #include <sys/model.h>
77 #include <sys/refhash.h>
78 #include <sys/sysevent.h>
79 #include <sys/sysevent/eventdefs.h>
80 #include <sys/sysevent/dr.h>
81 #include <sys/sata/sata_defs.h>
82 #include <sys/sata/sata_hba.h>
83 #include <sys/scsi/generic/sas.h>
84 #include <sys/scsi/impl/scsi_sas.h>
85 
86 #pragma pack(1)
87 #include <sys/scsi/adapters/mpi/mpi2_type.h>
88 #include <sys/scsi/adapters/mpi/mpi2.h>
89 #include <sys/scsi/adapters/mpi/mpi2_cnfg.h>
90 #include <sys/scsi/adapters/mpi/mpi2_init.h>
91 #include <sys/scsi/adapters/mpi/mpi2_ioc.h>
92 #include <sys/scsi/adapters/mpi/mpi2_sas.h>
93 #include <sys/scsi/adapters/mpi/mpi2_tool.h>
94 #include <sys/scsi/adapters/mpi/mpi2_raid.h>
95 #pragma pack()
96 
97 /*
98  * private header files.
99  *
100  */
101 #include <sys/scsi/impl/scsi_reset_notify.h>
102 #include <sys/scsi/adapters/mpt_sas/mptsas_var.h>
103 #include <sys/scsi/adapters/mpt_sas/mptsas_ioctl.h>
104 #include <sys/scsi/adapters/mpt_sas/mptsas_smhba.h>
105 #include <sys/raidioctl.h>
106 
107 #include <sys/fs/dv_node.h>	/* devfs_clean */
108 
109 /*
110  * FMA header files
111  */
112 #include <sys/ddifm.h>
113 #include <sys/fm/protocol.h>
114 #include <sys/fm/util.h>
115 #include <sys/fm/io/ddi.h>
116 
117 /*
118  * autoconfiguration data and routines.
119  */
120 static int mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
121 static int mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
122 static int mptsas_power(dev_info_t *dip, int component, int level);
123 
124 /*
125  * cb_ops function
126  */
127 static int mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
128 	cred_t *credp, int *rval);
129 #ifdef __sparc
130 static int mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd);
131 #else  /* __sparc */
132 static int mptsas_quiesce(dev_info_t *devi);
133 #endif	/* __sparc */
134 
135 /*
136  * ddi_ufm_ops
137  */
138 static int mptsas_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg,
139     uint_t imgno, ddi_ufm_image_t *img);
140 static int mptsas_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg,
141     uint_t imgno, uint_t slotno, ddi_ufm_slot_t *slot);
142 static int mptsas_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg,
143     ddi_ufm_cap_t *caps);
144 
145 /*
146  * Resource initialization for hardware
147  */
148 static void mptsas_setup_cmd_reg(mptsas_t *mpt);
149 static void mptsas_disable_bus_master(mptsas_t *mpt);
150 static void mptsas_hba_fini(mptsas_t *mpt);
151 static void mptsas_cfg_fini(mptsas_t *mptsas_blkp);
152 static int mptsas_hba_setup(mptsas_t *mpt);
153 static void mptsas_hba_teardown(mptsas_t *mpt);
154 static int mptsas_config_space_init(mptsas_t *mpt);
155 static void mptsas_config_space_fini(mptsas_t *mpt);
156 static void mptsas_iport_register(mptsas_t *mpt);
157 static int mptsas_smp_setup(mptsas_t *mpt);
158 static void mptsas_smp_teardown(mptsas_t *mpt);
159 static int mptsas_enc_setup(mptsas_t *mpt);
160 static void mptsas_enc_teardown(mptsas_t *mpt);
161 static int mptsas_cache_create(mptsas_t *mpt);
162 static void mptsas_cache_destroy(mptsas_t *mpt);
163 static int mptsas_alloc_request_frames(mptsas_t *mpt);
164 static int mptsas_alloc_sense_bufs(mptsas_t *mpt);
165 static int mptsas_alloc_reply_frames(mptsas_t *mpt);
166 static int mptsas_alloc_free_queue(mptsas_t *mpt);
167 static int mptsas_alloc_post_queue(mptsas_t *mpt);
168 static void mptsas_alloc_reply_args(mptsas_t *mpt);
169 static int mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
170 static void mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd);
171 static int mptsas_init_chip(mptsas_t *mpt, int first_time);
172 static void mptsas_update_hashtab(mptsas_t *mpt);
173 
174 /*
175  * SCSA function prototypes
176  */
177 static int mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
178 static int mptsas_scsi_reset(struct scsi_address *ap, int level);
179 static int mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
180 static int mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly);
181 static int mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value,
182     int tgtonly);
183 static void mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
184 static struct scsi_pkt *mptsas_scsi_init_pkt(struct scsi_address *ap,
185     struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
186 	int tgtlen, int flags, int (*callback)(), caddr_t arg);
187 static void mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
188 static void mptsas_scsi_destroy_pkt(struct scsi_address *ap,
189     struct scsi_pkt *pkt);
190 static int mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
191     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
192 static void mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
193     scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
194 static int mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
195     void (*callback)(caddr_t), caddr_t arg);
196 static int mptsas_get_name(struct scsi_device *sd, char *name, int len);
197 static int mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len);
198 static int mptsas_scsi_quiesce(dev_info_t *dip);
199 static int mptsas_scsi_unquiesce(dev_info_t *dip);
200 static int mptsas_bus_config(dev_info_t *pdip, uint_t flags,
201     ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
202 
203 /*
204  * SMP functions
205  */
206 static int mptsas_smp_start(struct smp_pkt *smp_pkt);
207 
208 /*
209  * internal function prototypes.
210  */
211 static void mptsas_list_add(mptsas_t *mpt);
212 static void mptsas_list_del(mptsas_t *mpt);
213 
214 static int mptsas_quiesce_bus(mptsas_t *mpt);
215 static int mptsas_unquiesce_bus(mptsas_t *mpt);
216 
217 static int mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size);
218 static void mptsas_free_handshake_msg(mptsas_t *mpt);
219 
220 static void mptsas_ncmds_checkdrain(void *arg);
221 
222 static int mptsas_prepare_pkt(mptsas_cmd_t *cmd);
223 static int mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
224 static int mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *sp);
225 static void mptsas_accept_tx_waitq(mptsas_t *mpt);
226 
227 static int mptsas_do_detach(dev_info_t *dev);
228 static int mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl);
229 static int mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun,
230     struct scsi_pkt *pkt);
231 static int mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp);
232 
233 static void mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd);
234 static void mptsas_handle_event(void *args);
235 static int mptsas_handle_event_sync(void *args);
236 static void mptsas_handle_dr(void *args);
237 static void mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
238     dev_info_t *pdip);
239 
240 static void mptsas_restart_cmd(void *);
241 
242 static void mptsas_flush_hba(mptsas_t *mpt);
243 static void mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun,
244 	uint8_t tasktype);
245 static void mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd,
246     uchar_t reason, uint_t stat);
247 
248 static uint_t mptsas_intr(caddr_t arg1, caddr_t arg2);
249 static void mptsas_process_intr(mptsas_t *mpt,
250     pMpi2ReplyDescriptorsUnion_t reply_desc_union);
251 static void mptsas_handle_scsi_io_success(mptsas_t *mpt,
252     pMpi2ReplyDescriptorsUnion_t reply_desc);
253 static void mptsas_handle_address_reply(mptsas_t *mpt,
254     pMpi2ReplyDescriptorsUnion_t reply_desc);
255 static int mptsas_wait_intr(mptsas_t *mpt, int polltime);
256 static void mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd,
257     uint32_t *control, pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl);
258 
259 static void mptsas_watch(void *arg);
260 static void mptsas_watchsubr(mptsas_t *mpt);
261 static void mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt);
262 
263 static void mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd);
264 static int mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
265     uint8_t *data, uint32_t request_size, uint32_t reply_size,
266     uint32_t data_size, uint32_t direction, uint8_t *dataout,
267     uint32_t dataout_size, short timeout, int mode);
268 static int mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl);
269 
270 static uint8_t mptsas_get_fw_diag_buffer_number(mptsas_t *mpt,
271     uint32_t unique_id);
272 static void mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd);
273 static int mptsas_post_fw_diag_buffer(mptsas_t *mpt,
274     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code);
275 static int mptsas_release_fw_diag_buffer(mptsas_t *mpt,
276     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
277     uint32_t diag_type);
278 static int mptsas_diag_register(mptsas_t *mpt,
279     mptsas_fw_diag_register_t *diag_register, uint32_t *return_code);
280 static int mptsas_diag_unregister(mptsas_t *mpt,
281     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code);
282 static int mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
283     uint32_t *return_code);
284 static int mptsas_diag_read_buffer(mptsas_t *mpt,
285     mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
286     uint32_t *return_code, int ioctl_mode);
287 static int mptsas_diag_release(mptsas_t *mpt,
288     mptsas_fw_diag_release_t *diag_release, uint32_t *return_code);
289 static int mptsas_do_diag_action(mptsas_t *mpt, uint32_t action,
290     uint8_t *diag_action, uint32_t length, uint32_t *return_code,
291     int ioctl_mode);
292 static int mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *data,
293     int mode);
294 
295 static int mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
296     int cmdlen, int tgtlen, int statuslen, int kf);
297 static void mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd);
298 
299 static int mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags);
300 static void mptsas_kmem_cache_destructor(void *buf, void *cdrarg);
301 
302 static int mptsas_cache_frames_constructor(void *buf, void *cdrarg,
303     int kmflags);
304 static void mptsas_cache_frames_destructor(void *buf, void *cdrarg);
305 
306 static void mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
307     mptsas_cmd_t *cmd);
308 static void mptsas_check_task_mgt(mptsas_t *mpt,
309     pMpi2SCSIManagementReply_t reply, mptsas_cmd_t *cmd);
310 static int mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
311     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
312     int *resid);
313 
314 static int mptsas_alloc_active_slots(mptsas_t *mpt, int flag);
315 static void mptsas_free_active_slots(mptsas_t *mpt);
316 static int mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
317 
318 static void mptsas_restart_hba(mptsas_t *mpt);
319 static void mptsas_restart_waitq(mptsas_t *mpt);
320 
321 static void mptsas_deliver_doneq_thread(mptsas_t *mpt);
322 static void mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd);
323 static void mptsas_doneq_mv(mptsas_t *mpt, uint64_t t);
324 
325 static mptsas_cmd_t *mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t);
326 static void mptsas_doneq_empty(mptsas_t *mpt);
327 static void mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg);
328 
329 static mptsas_cmd_t *mptsas_waitq_rm(mptsas_t *mpt);
330 static void mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
331 static mptsas_cmd_t *mptsas_tx_waitq_rm(mptsas_t *mpt);
332 static void mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd);
333 
334 
335 static void mptsas_start_watch_reset_delay();
336 static void mptsas_setup_bus_reset_delay(mptsas_t *mpt);
337 static void mptsas_watch_reset_delay(void *arg);
338 static int mptsas_watch_reset_delay_subr(mptsas_t *mpt);
339 
340 /*
341  * helper functions
342  */
343 static void mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd);
344 
345 static dev_info_t *mptsas_find_child(dev_info_t *pdip, char *name);
346 static dev_info_t *mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy);
347 static dev_info_t *mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr,
348     int lun);
349 static mdi_pathinfo_t *mptsas_find_path_addr(dev_info_t *pdip, uint64_t sasaddr,
350     int lun);
351 static mdi_pathinfo_t *mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy);
352 static dev_info_t *mptsas_find_smp_child(dev_info_t *pdip, char *str_wwn);
353 
354 static int mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy,
355     int *lun);
356 static int mptsas_parse_smp_name(char *name, uint64_t *wwn);
357 
358 static mptsas_target_t *mptsas_phy_to_tgt(mptsas_t *mpt,
359     mptsas_phymask_t phymask, uint8_t phy);
360 static mptsas_target_t *mptsas_wwid_to_ptgt(mptsas_t *mpt,
361     mptsas_phymask_t phymask, uint64_t wwid);
362 static mptsas_smp_t *mptsas_wwid_to_psmp(mptsas_t *mpt,
363     mptsas_phymask_t phymask, uint64_t wwid);
364 
365 static int mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun,
366     uchar_t page, unsigned char *buf, int len, int *rlen, uchar_t evpd);
367 
368 static int mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
369     uint16_t *handle, mptsas_target_t **pptgt);
370 static void mptsas_update_phymask(mptsas_t *mpt);
371 
372 static int mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep,
373     uint16_t idx);
374 static int mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
375     uint32_t *status, uint8_t cmd);
376 static dev_info_t *mptsas_get_dip_from_dev(dev_t dev,
377     mptsas_phymask_t *phymask);
378 static mptsas_target_t *mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr,
379     mptsas_phymask_t phymask);
380 
381 
382 /*
383  * Enumeration / DR functions
384  */
385 static void mptsas_config_all(dev_info_t *pdip);
386 static int mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
387     dev_info_t **lundip);
388 static int mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
389     dev_info_t **lundip);
390 
391 static int mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt);
392 static int mptsas_offline_target(dev_info_t *pdip, char *name);
393 
394 static int mptsas_config_raid(dev_info_t *pdip, uint16_t target,
395     dev_info_t **dip);
396 
397 static int mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt);
398 static int mptsas_probe_lun(dev_info_t *pdip, int lun,
399     dev_info_t **dip, mptsas_target_t *ptgt);
400 
401 static int mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
402     dev_info_t **dip, mptsas_target_t *ptgt, int lun);
403 
404 static int mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
405     char *guid, dev_info_t **dip, mptsas_target_t *ptgt, int lun);
406 static int mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *sd,
407     char *guid, dev_info_t **dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt,
408     int lun);
409 
410 static void mptsas_offline_missed_luns(dev_info_t *pdip,
411     uint16_t *repluns, int lun_cnt, mptsas_target_t *ptgt);
412 static int mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
413     mdi_pathinfo_t *rpip, uint_t flags);
414 
415 static int mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn,
416     dev_info_t **smp_dip);
417 static int mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
418     uint_t flags);
419 
420 static int mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data,
421     int mode, int *rval);
422 static int mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data,
423     int mode, int *rval);
424 static int mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data,
425     int mode, int *rval);
426 static void mptsas_record_event(void *args);
427 static int mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data,
428     int mode);
429 
430 mptsas_target_t *mptsas_tgt_alloc(refhash_t *, uint16_t, uint64_t,
431     uint32_t, mptsas_phymask_t, uint8_t);
432 static mptsas_smp_t *mptsas_smp_alloc(mptsas_t *, mptsas_smp_t *);
433 static int mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
434     dev_info_t **smp_dip);
435 
436 /*
437  * Power management functions
438  */
439 static int mptsas_get_pci_cap(mptsas_t *mpt);
440 static int mptsas_init_pm(mptsas_t *mpt);
441 
442 /*
443  * MPT MSI tunable:
444  *
445  * By default MSI is enabled on all supported platforms.
446  */
447 boolean_t mptsas_enable_msi = B_TRUE;
448 boolean_t mptsas_physical_bind_failed_page_83 = B_FALSE;
449 
450 /*
451  * Global switch for use of MPI2.5 FAST PATH.
452  * We don't really know what FAST PATH actually does, so if it is suspected
453  * to cause problems it can be turned off by setting this variable to B_FALSE.
454  */
455 boolean_t mptsas_use_fastpath = B_TRUE;
456 
457 static int mptsas_register_intrs(mptsas_t *);
458 static void mptsas_unregister_intrs(mptsas_t *);
459 static int mptsas_add_intrs(mptsas_t *, int);
460 static void mptsas_rem_intrs(mptsas_t *);
461 
462 /*
463  * FMA Prototypes
464  */
465 static void mptsas_fm_init(mptsas_t *mpt);
466 static void mptsas_fm_fini(mptsas_t *mpt);
467 static int mptsas_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
468 
469 extern pri_t minclsyspri, maxclsyspri;
470 
471 /*
472  * This device is created by the SCSI pseudo nexus driver (SCSI vHCI).  It is
473  * under this device that the paths to a physical device are created when
474  * MPxIO is used.
475  */
476 extern dev_info_t	*scsi_vhci_dip;
477 
478 /*
479  * Tunable timeout value for Inquiry VPD page 0x83
480  * By default the value is 30 seconds.
481  */
482 int mptsas_inq83_retry_timeout = 30;
483 
484 /*
485  * This is used to allocate memory for message frame storage, not for
486  * data I/O DMA. All message frames must be stored in the first 4G of
487  * physical memory.
488  */
489 ddi_dma_attr_t mptsas_dma_attrs = {
490 	DMA_ATTR_V0,	/* attribute layout version		*/
491 	0x0ull,		/* address low - should be 0 (longlong)	*/
492 	0xffffffffull,	/* address high - 32-bit max range	*/
493 	0x00ffffffull,	/* count max - max DMA object size	*/
494 	4,		/* allocation alignment requirements	*/
495 	0x78,		/* burstsizes - binary encoded values	*/
496 	1,		/* minxfer - gran. of DMA engine	*/
497 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
498 	0xffffffffull,	/* max segment size (DMA boundary)	*/
499 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
500 	512,		/* granularity - device transfer size	*/
501 	0		/* flags, set to 0			*/
502 };
503 
504 /*
505  * This is used for data I/O DMA memory allocation. (full 64-bit DMA
506  * physical addresses are supported.)
507  */
508 ddi_dma_attr_t mptsas_dma_attrs64 = {
509 	DMA_ATTR_V0,	/* attribute layout version		*/
510 	0x0ull,		/* address low - should be 0 (longlong)	*/
511 	0xffffffffffffffffull,	/* address high - 64-bit max	*/
512 	0x00ffffffull,	/* count max - max DMA object size	*/
513 	4,		/* allocation alignment requirements	*/
514 	0x78,		/* burstsizes - binary encoded values	*/
515 	1,		/* minxfer - gran. of DMA engine	*/
516 	0x00ffffffull,	/* maxxfer - gran. of DMA engine	*/
517 	0xffffffffull,	/* max segment size (DMA boundary)	*/
518 	MPTSAS_MAX_DMA_SEGS, /* scatter/gather list length	*/
519 	512,		/* granularity - device transfer size	*/
520 	0		/* flags, set to 0 */
521 };
522 
523 ddi_device_acc_attr_t mptsas_dev_attr = {
524 	DDI_DEVICE_ATTR_V1,
525 	DDI_STRUCTURE_LE_ACC,
526 	DDI_STRICTORDER_ACC,
527 	DDI_DEFAULT_ACC
528 };
529 
530 static struct cb_ops mptsas_cb_ops = {
531 	scsi_hba_open,		/* open */
532 	scsi_hba_close,		/* close */
533 	nodev,			/* strategy */
534 	nodev,			/* print */
535 	nodev,			/* dump */
536 	nodev,			/* read */
537 	nodev,			/* write */
538 	mptsas_ioctl,		/* ioctl */
539 	nodev,			/* devmap */
540 	nodev,			/* mmap */
541 	nodev,			/* segmap */
542 	nochpoll,		/* chpoll */
543 	ddi_prop_op,		/* cb_prop_op */
544 	NULL,			/* streamtab */
545 	D_MP,			/* cb_flag */
546 	CB_REV,			/* rev */
547 	nodev,			/* aread */
548 	nodev			/* awrite */
549 };
550 
551 static struct dev_ops mptsas_ops = {
552 	DEVO_REV,		/* devo_rev, */
553 	0,			/* refcnt  */
554 	ddi_no_info,		/* info */
555 	nulldev,		/* identify */
556 	nulldev,		/* probe */
557 	mptsas_attach,		/* attach */
558 	mptsas_detach,		/* detach */
559 #ifdef  __sparc
560 	mptsas_reset,
561 #else
562 	nodev,			/* reset */
563 #endif  /* __sparc */
564 	&mptsas_cb_ops,		/* driver operations */
565 	NULL,			/* bus operations */
566 	mptsas_power,		/* power management */
567 #ifdef	__sparc
568 	ddi_quiesce_not_needed
569 #else
570 	mptsas_quiesce		/* quiesce */
571 #endif	/* __sparc */
572 };
573 
574 static ddi_ufm_ops_t mptsas_ufm_ops = {
575 	NULL,
576 	mptsas_ufm_fill_image,
577 	mptsas_ufm_fill_slot,
578 	mptsas_ufm_getcaps
579 };
580 
581 #define	MPTSAS_MOD_STRING "MPTSAS HBA Driver 00.00.00.24"
582 
583 static struct modldrv modldrv = {
584 	&mod_driverops,	/* Type of module. This one is a driver */
585 	MPTSAS_MOD_STRING, /* Name of the module. */
586 	&mptsas_ops,	/* driver ops */
587 };
588 
589 static struct modlinkage modlinkage = {
590 	MODREV_1, &modldrv, NULL
591 };
592 #define	TARGET_PROP	"target"
593 #define	LUN_PROP	"lun"
594 #define	LUN64_PROP	"lun64"
595 #define	SAS_PROP	"sas-mpt"
596 #define	MDI_GUID	"wwn"
597 #define	NDI_GUID	"guid"
598 #define	MPTSAS_DEV_GONE	"mptsas_dev_gone"
599 
600 /*
601  * Local static data
602  */
603 #if defined(MPTSAS_DEBUG)
604 /*
605  * Flags to indicate which debug messages are to be printed and which go to the
606  * debug log ring buffer. Default is to not print anything, and to log
607  * everything except the watchsubr() output which normally happens every second.
608  */
609 uint32_t mptsas_debugprt_flags = 0x0;
610 uint32_t mptsas_debuglog_flags = ~(1U << 30);
611 #endif	/* defined(MPTSAS_DEBUG) */
612 uint32_t mptsas_debug_resets = 0;
613 
614 static kmutex_t		mptsas_global_mutex;
615 static void		*mptsas_state;		/* soft	state ptr */
616 static krwlock_t	mptsas_global_rwlock;
617 
618 static kmutex_t		mptsas_log_mutex;
619 static char		mptsas_log_buf[256];
620 _NOTE(MUTEX_PROTECTS_DATA(mptsas_log_mutex, mptsas_log_buf))
621 
622 static mptsas_t *mptsas_head, *mptsas_tail;
623 static clock_t mptsas_scsi_watchdog_tick;
624 static clock_t mptsas_tick;
625 static timeout_id_t mptsas_reset_watch;
626 static timeout_id_t mptsas_timeout_id;
627 static int mptsas_timeouts_enabled = 0;
628 
629 /*
630  * Default length for extended auto request sense buffers.
631  * All sense buffers need to be under the same alloc because there
632  * is only one common top 32bits (of 64bits) address register.
633  * Most requests only require 32 bytes, but some request >256.
634  * We use rmalloc()/rmfree() on this additional memory to manage the
635  * "extended" requests.
636  */
637 int mptsas_extreq_sense_bufsize = 256*64;
638 
639 /*
640  * We believe that all software resrictions of having to run with DMA
641  * attributes to limit allocation to the first 4G are removed.
642  * However, this flag remains to enable quick switchback should suspicious
643  * problems emerge.
644  * Note that scsi_alloc_consistent_buf() does still adhere to allocating
645  * 32 bit addressable memory, but we can cope if that is changed now.
646  */
647 int mptsas_use_64bit_msgaddr = 1;
648 
649 /*
650  * warlock directives
651  */
652 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt \
653 	mptsas_cmd NcrTableIndirect buf scsi_cdb scsi_status))
654 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", smp_pkt))
655 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
656 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", mptsas_tgt_private))
657 _NOTE(SCHEME_PROTECTS_DATA("No Mutex Needed", scsi_hba_tran::tran_tgt_private))
658 
659 /*
660  * SM - HBA statics
661  */
662 char	*mptsas_driver_rev = MPTSAS_MOD_STRING;
663 
664 #ifdef MPTSAS_DEBUG
665 void debug_enter(char *);
666 #endif
667 
668 /*
669  * Notes:
670  *	- scsi_hba_init(9F) initializes SCSI HBA modules
671  *	- must call scsi_hba_fini(9F) if modload() fails
672  */
673 int
_init(void)674 _init(void)
675 {
676 	int status;
677 	/* CONSTCOND */
678 	ASSERT(NO_COMPETING_THREADS);
679 
680 	NDBG0(("_init"));
681 
682 	status = ddi_soft_state_init(&mptsas_state, MPTSAS_SIZE,
683 	    MPTSAS_INITIAL_SOFT_SPACE);
684 	if (status != 0) {
685 		return (status);
686 	}
687 
688 	if ((status = scsi_hba_init(&modlinkage)) != 0) {
689 		ddi_soft_state_fini(&mptsas_state);
690 		return (status);
691 	}
692 
693 	mutex_init(&mptsas_global_mutex, NULL, MUTEX_DRIVER, NULL);
694 	rw_init(&mptsas_global_rwlock, NULL, RW_DRIVER, NULL);
695 	mutex_init(&mptsas_log_mutex, NULL, MUTEX_DRIVER, NULL);
696 
697 	if ((status = mod_install(&modlinkage)) != 0) {
698 		mutex_destroy(&mptsas_log_mutex);
699 		rw_destroy(&mptsas_global_rwlock);
700 		mutex_destroy(&mptsas_global_mutex);
701 		ddi_soft_state_fini(&mptsas_state);
702 		scsi_hba_fini(&modlinkage);
703 	}
704 
705 	return (status);
706 }
707 
708 /*
709  * Notes:
710  *	- scsi_hba_fini(9F) uninitializes SCSI HBA modules
711  */
712 int
_fini(void)713 _fini(void)
714 {
715 	int	status;
716 	/* CONSTCOND */
717 	ASSERT(NO_COMPETING_THREADS);
718 
719 	NDBG0(("_fini"));
720 
721 	if ((status = mod_remove(&modlinkage)) == 0) {
722 		ddi_soft_state_fini(&mptsas_state);
723 		scsi_hba_fini(&modlinkage);
724 		mutex_destroy(&mptsas_global_mutex);
725 		rw_destroy(&mptsas_global_rwlock);
726 		mutex_destroy(&mptsas_log_mutex);
727 	}
728 	return (status);
729 }
730 
731 /*
732  * The loadable-module _info(9E) entry point
733  */
734 int
_info(struct modinfo * modinfop)735 _info(struct modinfo *modinfop)
736 {
737 	/* CONSTCOND */
738 	ASSERT(NO_COMPETING_THREADS);
739 	NDBG0(("mptsas _info"));
740 
741 	return (mod_info(&modlinkage, modinfop));
742 }
743 
744 static int
mptsas_target_eval_devhdl(const void * op,void * arg)745 mptsas_target_eval_devhdl(const void *op, void *arg)
746 {
747 	uint16_t dh = *(uint16_t *)arg;
748 	const mptsas_target_t *tp = op;
749 
750 	return ((int)tp->m_devhdl - (int)dh);
751 }
752 
753 static int
mptsas_target_eval_nowwn(const void * op,void * arg)754 mptsas_target_eval_nowwn(const void *op, void *arg)
755 {
756 	uint8_t phy = *(uint8_t *)arg;
757 	const mptsas_target_t *tp = op;
758 
759 	if (tp->m_addr.mta_wwn != 0)
760 		return (-1);
761 
762 	return ((int)tp->m_phynum - (int)phy);
763 }
764 
765 static int
mptsas_smp_eval_devhdl(const void * op,void * arg)766 mptsas_smp_eval_devhdl(const void *op, void *arg)
767 {
768 	uint16_t dh = *(uint16_t *)arg;
769 	const mptsas_smp_t *sp = op;
770 
771 	return ((int)sp->m_devhdl - (int)dh);
772 }
773 
774 static uint64_t
mptsas_target_addr_hash(const void * tp)775 mptsas_target_addr_hash(const void *tp)
776 {
777 	const mptsas_target_addr_t *tap = tp;
778 
779 	return ((tap->mta_wwn & 0xffffffffffffULL) |
780 	    ((uint64_t)tap->mta_phymask << 48));
781 }
782 
783 static int
mptsas_target_addr_cmp(const void * a,const void * b)784 mptsas_target_addr_cmp(const void *a, const void *b)
785 {
786 	const mptsas_target_addr_t *aap = a;
787 	const mptsas_target_addr_t *bap = b;
788 
789 	if (aap->mta_wwn < bap->mta_wwn)
790 		return (-1);
791 	if (aap->mta_wwn > bap->mta_wwn)
792 		return (1);
793 	return ((int)bap->mta_phymask - (int)aap->mta_phymask);
794 }
795 
796 static uint64_t
mptsas_tmp_target_hash(const void * tp)797 mptsas_tmp_target_hash(const void *tp)
798 {
799 	return ((uint64_t)(uintptr_t)tp);
800 }
801 
802 static int
mptsas_tmp_target_cmp(const void * a,const void * b)803 mptsas_tmp_target_cmp(const void *a, const void *b)
804 {
805 	if (a > b)
806 		return (1);
807 	if (b < a)
808 		return (-1);
809 
810 	return (0);
811 }
812 
813 static void
mptsas_target_free(void * op)814 mptsas_target_free(void *op)
815 {
816 	kmem_free(op, sizeof (mptsas_target_t));
817 }
818 
819 static void
mptsas_smp_free(void * op)820 mptsas_smp_free(void *op)
821 {
822 	kmem_free(op, sizeof (mptsas_smp_t));
823 }
824 
825 static void
mptsas_destroy_hashes(mptsas_t * mpt)826 mptsas_destroy_hashes(mptsas_t *mpt)
827 {
828 	mptsas_target_t *tp;
829 	mptsas_smp_t *sp;
830 
831 	for (tp = refhash_first(mpt->m_targets); tp != NULL;
832 	    tp = refhash_next(mpt->m_targets, tp)) {
833 		refhash_remove(mpt->m_targets, tp);
834 	}
835 	for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
836 	    sp = refhash_next(mpt->m_smp_targets, sp)) {
837 		refhash_remove(mpt->m_smp_targets, sp);
838 	}
839 	refhash_destroy(mpt->m_tmp_targets);
840 	refhash_destroy(mpt->m_targets);
841 	refhash_destroy(mpt->m_smp_targets);
842 	mpt->m_targets = NULL;
843 	mpt->m_smp_targets = NULL;
844 }
845 
846 static int
mptsas_iport_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)847 mptsas_iport_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
848 {
849 	dev_info_t		*pdip;
850 	mptsas_t		*mpt;
851 	scsi_hba_tran_t		*hba_tran;
852 	char			*iport = NULL;
853 	char			phymask[MPTSAS_MAX_PHYS];
854 	mptsas_phymask_t	phy_mask = 0;
855 	int			dynamic_port = 0;
856 	uint32_t		page_address;
857 	char			initiator_wwnstr[MPTSAS_WWN_STRLEN];
858 	int			rval = DDI_FAILURE;
859 	int			i = 0;
860 	uint8_t			numphys = 0;
861 	uint8_t			phy_id;
862 	uint8_t			phy_port = 0;
863 	uint16_t		attached_devhdl = 0;
864 	uint32_t		dev_info;
865 	uint64_t		attached_sas_wwn;
866 	uint16_t		dev_hdl;
867 	uint16_t		pdev_hdl;
868 	uint16_t		bay_num, enclosure, io_flags;
869 	char			attached_wwnstr[MPTSAS_WWN_STRLEN];
870 
871 	/* CONSTCOND */
872 	ASSERT(NO_COMPETING_THREADS);
873 
874 	switch (cmd) {
875 	case DDI_ATTACH:
876 		break;
877 
878 	case DDI_RESUME:
879 		/*
880 		 * If this a scsi-iport node, nothing to do here.
881 		 */
882 		return (DDI_SUCCESS);
883 
884 	default:
885 		return (DDI_FAILURE);
886 	}
887 
888 	pdip = ddi_get_parent(dip);
889 
890 	if ((hba_tran = ndi_flavorv_get(pdip, SCSA_FLAVOR_SCSI_DEVICE)) ==
891 	    NULL) {
892 		cmn_err(CE_WARN, "Failed attach iport because fail to "
893 		    "get tran vector for the HBA node");
894 		return (DDI_FAILURE);
895 	}
896 
897 	mpt = TRAN2MPT(hba_tran);
898 	ASSERT(mpt != NULL);
899 	if (mpt == NULL)
900 		return (DDI_FAILURE);
901 
902 	if ((hba_tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) ==
903 	    NULL) {
904 		mptsas_log(mpt, CE_WARN, "Failed attach iport because fail to "
905 		    "get tran vector for the iport node");
906 		return (DDI_FAILURE);
907 	}
908 
909 	/*
910 	 * Overwrite parent's tran_hba_private to iport's tran vector
911 	 */
912 	hba_tran->tran_hba_private = mpt;
913 
914 	ddi_report_dev(dip);
915 
916 	/*
917 	 * Get SAS address for initiator port according dev_handle
918 	 */
919 	iport = ddi_get_name_addr(dip);
920 	if (iport && strncmp(iport, "v0", 2) == 0) {
921 		if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
922 		    MPTSAS_VIRTUAL_PORT, 1) !=
923 		    DDI_PROP_SUCCESS) {
924 			(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
925 			    MPTSAS_VIRTUAL_PORT);
926 			mptsas_log(mpt, CE_WARN, "mptsas virtual port "
927 			    "prop update failed");
928 			return (DDI_FAILURE);
929 		}
930 		return (DDI_SUCCESS);
931 	}
932 
933 	mutex_enter(&mpt->m_mutex);
934 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
935 		bzero(phymask, sizeof (phymask));
936 		(void) sprintf(phymask,
937 		    "%x", mpt->m_phy_info[i].phy_mask);
938 		if (strcmp(phymask, iport) == 0) {
939 			break;
940 		}
941 	}
942 
943 	if (i == MPTSAS_MAX_PHYS) {
944 		mptsas_log(mpt, CE_WARN, "Failed attach port %s because port"
945 		    "seems not exist", iport);
946 		mutex_exit(&mpt->m_mutex);
947 		return (DDI_FAILURE);
948 	}
949 
950 	phy_mask = mpt->m_phy_info[i].phy_mask;
951 
952 	if (mpt->m_phy_info[i].port_flags & AUTO_PORT_CONFIGURATION)
953 		dynamic_port = 1;
954 	else
955 		dynamic_port = 0;
956 
957 	/*
958 	 * Update PHY info for smhba
959 	 */
960 	if (mptsas_smhba_phy_init(mpt)) {
961 		mutex_exit(&mpt->m_mutex);
962 		mptsas_log(mpt, CE_WARN, "mptsas phy update "
963 		    "failed");
964 		return (DDI_FAILURE);
965 	}
966 
967 	mutex_exit(&mpt->m_mutex);
968 
969 	numphys = 0;
970 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
971 		if ((phy_mask >> i) & 0x01) {
972 			numphys++;
973 		}
974 	}
975 
976 	bzero(initiator_wwnstr, sizeof (initiator_wwnstr));
977 	(void) sprintf(initiator_wwnstr, "w%016"PRIx64,
978 	    mpt->un.m_base_wwid);
979 
980 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
981 	    SCSI_ADDR_PROP_INITIATOR_PORT, initiator_wwnstr) !=
982 	    DDI_PROP_SUCCESS) {
983 		(void) ddi_prop_remove(DDI_DEV_T_NONE,
984 		    dip, SCSI_ADDR_PROP_INITIATOR_PORT);
985 		mptsas_log(mpt, CE_WARN, "mptsas Initiator port "
986 		    "prop update failed");
987 		return (DDI_FAILURE);
988 	}
989 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
990 	    MPTSAS_NUM_PHYS, numphys) !=
991 	    DDI_PROP_SUCCESS) {
992 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, MPTSAS_NUM_PHYS);
993 		return (DDI_FAILURE);
994 	}
995 
996 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
997 	    "phymask", phy_mask) !=
998 	    DDI_PROP_SUCCESS) {
999 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "phymask");
1000 		mptsas_log(mpt, CE_WARN, "mptsas phy mask "
1001 		    "prop update failed");
1002 		return (DDI_FAILURE);
1003 	}
1004 
1005 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
1006 	    "dynamic-port", dynamic_port) !=
1007 	    DDI_PROP_SUCCESS) {
1008 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip, "dynamic-port");
1009 		mptsas_log(mpt, CE_WARN, "mptsas dynamic port "
1010 		    "prop update failed");
1011 		return (DDI_FAILURE);
1012 	}
1013 	if (ddi_prop_update_int(DDI_DEV_T_NONE, dip,
1014 	    MPTSAS_VIRTUAL_PORT, 0) !=
1015 	    DDI_PROP_SUCCESS) {
1016 		(void) ddi_prop_remove(DDI_DEV_T_NONE, dip,
1017 		    MPTSAS_VIRTUAL_PORT);
1018 		mptsas_log(mpt, CE_WARN, "mptsas virtual port "
1019 		    "prop update failed");
1020 		return (DDI_FAILURE);
1021 	}
1022 	mptsas_smhba_set_all_phy_props(mpt, dip, numphys, phy_mask,
1023 	    &attached_devhdl);
1024 
1025 	mutex_enter(&mpt->m_mutex);
1026 	page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
1027 	    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)attached_devhdl;
1028 	rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
1029 	    &attached_sas_wwn, &dev_info, &phy_port, &phy_id,
1030 	    &pdev_hdl, &bay_num, &enclosure, &io_flags);
1031 	if (rval != DDI_SUCCESS) {
1032 		mptsas_log(mpt, CE_WARN,
1033 		    "Failed to get device page0 for handle:%d",
1034 		    attached_devhdl);
1035 		mutex_exit(&mpt->m_mutex);
1036 		return (DDI_FAILURE);
1037 	}
1038 
1039 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1040 		bzero(phymask, sizeof (phymask));
1041 		(void) sprintf(phymask, "%x", mpt->m_phy_info[i].phy_mask);
1042 		if (strcmp(phymask, iport) == 0) {
1043 			(void) sprintf(&mpt->m_phy_info[i].smhba_info.path[0],
1044 			    "%x",
1045 			    mpt->m_phy_info[i].phy_mask);
1046 		}
1047 	}
1048 	mutex_exit(&mpt->m_mutex);
1049 
1050 	bzero(attached_wwnstr, sizeof (attached_wwnstr));
1051 	(void) sprintf(attached_wwnstr, "w%016"PRIx64,
1052 	    attached_sas_wwn);
1053 	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip,
1054 	    SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
1055 	    DDI_PROP_SUCCESS) {
1056 		(void) ddi_prop_remove(DDI_DEV_T_NONE,
1057 		    dip, SCSI_ADDR_PROP_ATTACHED_PORT);
1058 		return (DDI_FAILURE);
1059 	}
1060 
1061 	/* Create kstats for each phy on this iport */
1062 
1063 	mptsas_create_phy_stats(mpt, iport, dip);
1064 
1065 	/*
1066 	 * register sas hba iport with mdi (MPxIO/vhci)
1067 	 */
1068 	if (mdi_phci_register(MDI_HCI_CLASS_SCSI,
1069 	    dip, 0) == MDI_SUCCESS) {
1070 		mpt->m_mpxio_enable = TRUE;
1071 	}
1072 	return (DDI_SUCCESS);
1073 }
1074 
1075 /*
1076  * Notes:
1077  *	Set up all device state and allocate data structures,
1078  *	mutexes, condition variables, etc. for device operation.
1079  *	Add interrupts needed.
1080  *	Return DDI_SUCCESS if device is ready, else return DDI_FAILURE.
1081  */
1082 static int
mptsas_attach(dev_info_t * dip,ddi_attach_cmd_t cmd)1083 mptsas_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1084 {
1085 	mptsas_t		*mpt = NULL;
1086 	int			instance, i, j;
1087 	int			doneq_thread_num;
1088 	char			intr_added = 0;
1089 	char			map_setup = 0;
1090 	char			config_setup = 0;
1091 	char			hba_attach_setup = 0;
1092 	char			smp_attach_setup = 0;
1093 	char			enc_attach_setup = 0;
1094 	char			mutex_init_done = 0;
1095 	char			event_taskq_create = 0;
1096 	char			dr_taskq_create = 0;
1097 	char			doneq_thread_create = 0;
1098 	char			added_watchdog = 0;
1099 	scsi_hba_tran_t		*hba_tran;
1100 	uint_t			mem_bar;
1101 	int			rval = DDI_FAILURE;
1102 
1103 	/* CONSTCOND */
1104 	ASSERT(NO_COMPETING_THREADS);
1105 
1106 	if (scsi_hba_iport_unit_address(dip)) {
1107 		return (mptsas_iport_attach(dip, cmd));
1108 	}
1109 
1110 	switch (cmd) {
1111 	case DDI_ATTACH:
1112 		break;
1113 
1114 	case DDI_RESUME:
1115 		if ((hba_tran = ddi_get_driver_private(dip)) == NULL)
1116 			return (DDI_FAILURE);
1117 
1118 		mpt = TRAN2MPT(hba_tran);
1119 
1120 		if (!mpt) {
1121 			return (DDI_FAILURE);
1122 		}
1123 
1124 		/*
1125 		 * Reset hardware and softc to "no outstanding commands"
1126 		 * Note	that a check condition can result on first command
1127 		 * to a	target.
1128 		 */
1129 		mutex_enter(&mpt->m_mutex);
1130 
1131 		/*
1132 		 * raise power.
1133 		 */
1134 		if (mpt->m_options & MPTSAS_OPT_PM) {
1135 			mutex_exit(&mpt->m_mutex);
1136 			(void) pm_busy_component(dip, 0);
1137 			rval = pm_power_has_changed(dip, 0, PM_LEVEL_D0);
1138 			if (rval == DDI_SUCCESS) {
1139 				mutex_enter(&mpt->m_mutex);
1140 			} else {
1141 				/*
1142 				 * The pm_raise_power() call above failed,
1143 				 * and that can only occur if we were unable
1144 				 * to reset the hardware.  This is probably
1145 				 * due to unhealty hardware, and because
1146 				 * important filesystems(such as the root
1147 				 * filesystem) could be on the attached disks,
1148 				 * it would not be a good idea to continue,
1149 				 * as we won't be entirely certain we are
1150 				 * writing correct data.  So we panic() here
1151 				 * to not only prevent possible data corruption,
1152 				 * but to give developers or end users a hope
1153 				 * of identifying and correcting any problems.
1154 				 */
1155 				fm_panic("mptsas could not reset hardware "
1156 				    "during resume");
1157 			}
1158 		}
1159 
1160 		mpt->m_suspended = 0;
1161 
1162 		/*
1163 		 * Reinitialize ioc
1164 		 */
1165 		mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1166 		if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
1167 			mutex_exit(&mpt->m_mutex);
1168 			if (mpt->m_options & MPTSAS_OPT_PM) {
1169 				(void) pm_idle_component(dip, 0);
1170 			}
1171 			fm_panic("mptsas init chip fail during resume");
1172 		}
1173 		/*
1174 		 * mptsas_update_driver_data needs interrupts so enable them
1175 		 * first.
1176 		 */
1177 		MPTSAS_ENABLE_INTR(mpt);
1178 		mptsas_update_driver_data(mpt);
1179 
1180 		/* start requests, if possible */
1181 		mptsas_restart_hba(mpt);
1182 
1183 		mutex_exit(&mpt->m_mutex);
1184 
1185 		/*
1186 		 * Restart watch thread
1187 		 */
1188 		mutex_enter(&mptsas_global_mutex);
1189 		if (mptsas_timeout_id == 0) {
1190 			mptsas_timeout_id = timeout(mptsas_watch, NULL,
1191 			    mptsas_tick);
1192 			mptsas_timeouts_enabled = 1;
1193 		}
1194 		mutex_exit(&mptsas_global_mutex);
1195 
1196 		/* report idle status to pm framework */
1197 		if (mpt->m_options & MPTSAS_OPT_PM) {
1198 			(void) pm_idle_component(dip, 0);
1199 		}
1200 
1201 		return (DDI_SUCCESS);
1202 
1203 	default:
1204 		return (DDI_FAILURE);
1205 
1206 	}
1207 
1208 	instance = ddi_get_instance(dip);
1209 
1210 	/*
1211 	 * Allocate softc information.
1212 	 */
1213 	if (ddi_soft_state_zalloc(mptsas_state, instance) != DDI_SUCCESS) {
1214 		mptsas_log(NULL, CE_WARN,
1215 		    "mptsas%d: cannot allocate soft state", instance);
1216 		goto fail;
1217 	}
1218 
1219 	mpt = ddi_get_soft_state(mptsas_state, instance);
1220 
1221 	if (mpt == NULL) {
1222 		mptsas_log(NULL, CE_WARN,
1223 		    "mptsas%d: cannot get soft state", instance);
1224 		goto fail;
1225 	}
1226 
1227 	/* Indicate that we are 'sizeof (scsi_*(9S))' clean. */
1228 	scsi_size_clean(dip);
1229 
1230 	mpt->m_dip = dip;
1231 	mpt->m_instance = instance;
1232 
1233 	/* Make a per-instance copy of the structures */
1234 	mpt->m_io_dma_attr = mptsas_dma_attrs64;
1235 	if (mptsas_use_64bit_msgaddr) {
1236 		mpt->m_msg_dma_attr = mptsas_dma_attrs64;
1237 	} else {
1238 		mpt->m_msg_dma_attr = mptsas_dma_attrs;
1239 	}
1240 	mpt->m_reg_acc_attr = mptsas_dev_attr;
1241 	mpt->m_dev_acc_attr = mptsas_dev_attr;
1242 
1243 	/*
1244 	 * Size of individual request sense buffer
1245 	 */
1246 	mpt->m_req_sense_size = EXTCMDS_STATUS_SIZE;
1247 
1248 	/*
1249 	 * Initialize FMA
1250 	 */
1251 	mpt->m_fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, mpt->m_dip,
1252 	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
1253 	    DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1254 	    DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
1255 
1256 	mptsas_fm_init(mpt);
1257 
1258 	/*
1259 	 * Initialize us with the UFM subsystem
1260 	 */
1261 	if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &mptsas_ufm_ops,
1262 	    &mpt->m_ufmh, mpt) != 0) {
1263 		mptsas_log(mpt, CE_WARN, "failed to initialize UFM subsystem");
1264 		goto fail;
1265 	}
1266 
1267 	if (mptsas_alloc_handshake_msg(mpt,
1268 	    sizeof (Mpi2SCSITaskManagementRequest_t)) == DDI_FAILURE) {
1269 		mptsas_log(mpt, CE_WARN, "cannot initialize handshake msg.");
1270 		goto fail;
1271 	}
1272 
1273 	/*
1274 	 * Setup configuration space
1275 	 */
1276 	if (mptsas_config_space_init(mpt) == FALSE) {
1277 		mptsas_log(mpt, CE_WARN, "mptsas_config_space_init failed");
1278 		goto fail;
1279 	}
1280 	config_setup++;
1281 
1282 	mem_bar = mpt->m_mem_bar;
1283 	if (ddi_regs_map_setup(dip, mem_bar, (caddr_t *)&mpt->m_reg,
1284 	    0, 0, &mpt->m_reg_acc_attr, &mpt->m_datap) != DDI_SUCCESS) {
1285 		mptsas_log(mpt, CE_WARN, "map setup failed");
1286 		goto fail;
1287 	}
1288 	map_setup++;
1289 
1290 	/*
1291 	 * A taskq is created for dealing with the event handler
1292 	 */
1293 	if ((mpt->m_event_taskq = ddi_taskq_create(dip, "mptsas_event_taskq",
1294 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1295 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create failed");
1296 		goto fail;
1297 	}
1298 	event_taskq_create++;
1299 
1300 	/*
1301 	 * A taskq is created for dealing with dr events
1302 	 */
1303 	if ((mpt->m_dr_taskq = ddi_taskq_create(dip,
1304 	    "mptsas_dr_taskq",
1305 	    1, TASKQ_DEFAULTPRI, 0)) == NULL) {
1306 		mptsas_log(mpt, CE_NOTE, "ddi_taskq_create for discovery "
1307 		    "failed");
1308 		goto fail;
1309 	}
1310 	dr_taskq_create++;
1311 
1312 	mpt->m_doneq_thread_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1313 	    0, "mptsas_doneq_thread_threshold_prop", 10);
1314 	mpt->m_doneq_length_threshold = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1315 	    0, "mptsas_doneq_length_threshold_prop", 8);
1316 	mpt->m_doneq_thread_n = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1317 	    0, "mptsas_doneq_thread_n_prop", 8);
1318 
1319 	if (mpt->m_doneq_thread_n) {
1320 		cv_init(&mpt->m_doneq_thread_cv, NULL, CV_DRIVER, NULL);
1321 		mutex_init(&mpt->m_doneq_mutex, NULL, MUTEX_DRIVER, NULL);
1322 
1323 		mutex_enter(&mpt->m_doneq_mutex);
1324 		mpt->m_doneq_thread_id =
1325 		    kmem_zalloc(sizeof (mptsas_doneq_thread_list_t)
1326 		    * mpt->m_doneq_thread_n, KM_SLEEP);
1327 
1328 		for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1329 			cv_init(&mpt->m_doneq_thread_id[j].cv, NULL,
1330 			    CV_DRIVER, NULL);
1331 			mutex_init(&mpt->m_doneq_thread_id[j].mutex, NULL,
1332 			    MUTEX_DRIVER, NULL);
1333 			mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1334 			mpt->m_doneq_thread_id[j].flag |=
1335 			    MPTSAS_DONEQ_THREAD_ACTIVE;
1336 			mpt->m_doneq_thread_id[j].arg.mpt = mpt;
1337 			mpt->m_doneq_thread_id[j].arg.t = j;
1338 			mpt->m_doneq_thread_id[j].threadp =
1339 			    thread_create(NULL, 0, mptsas_doneq_thread,
1340 			    &mpt->m_doneq_thread_id[j].arg,
1341 			    0, &p0, TS_RUN, minclsyspri);
1342 			mpt->m_doneq_thread_id[j].donetail =
1343 			    &mpt->m_doneq_thread_id[j].doneq;
1344 			mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1345 		}
1346 		mutex_exit(&mpt->m_doneq_mutex);
1347 		doneq_thread_create++;
1348 	}
1349 
1350 	/*
1351 	 * Disable hardware interrupt since we're not ready to
1352 	 * handle it yet.
1353 	 */
1354 	MPTSAS_DISABLE_INTR(mpt);
1355 	if (mptsas_register_intrs(mpt) == FALSE)
1356 		goto fail;
1357 	intr_added++;
1358 
1359 	/* Initialize mutex used in interrupt handler */
1360 	mutex_init(&mpt->m_mutex, NULL, MUTEX_DRIVER,
1361 	    DDI_INTR_PRI(mpt->m_intr_pri));
1362 	mutex_init(&mpt->m_passthru_mutex, NULL, MUTEX_DRIVER, NULL);
1363 	mutex_init(&mpt->m_tx_waitq_mutex, NULL, MUTEX_DRIVER,
1364 	    DDI_INTR_PRI(mpt->m_intr_pri));
1365 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1366 		mutex_init(&mpt->m_phy_info[i].smhba_info.phy_mutex,
1367 		    NULL, MUTEX_DRIVER,
1368 		    DDI_INTR_PRI(mpt->m_intr_pri));
1369 	}
1370 
1371 	cv_init(&mpt->m_cv, NULL, CV_DRIVER, NULL);
1372 	cv_init(&mpt->m_passthru_cv, NULL, CV_DRIVER, NULL);
1373 	cv_init(&mpt->m_fw_cv, NULL, CV_DRIVER, NULL);
1374 	cv_init(&mpt->m_config_cv, NULL, CV_DRIVER, NULL);
1375 	cv_init(&mpt->m_fw_diag_cv, NULL, CV_DRIVER, NULL);
1376 	cv_init(&mpt->m_extreq_sense_refcount_cv, NULL, CV_DRIVER, NULL);
1377 	mutex_init_done++;
1378 
1379 	mutex_enter(&mpt->m_mutex);
1380 	/*
1381 	 * Initialize power management component
1382 	 */
1383 	if (mpt->m_options & MPTSAS_OPT_PM) {
1384 		if (mptsas_init_pm(mpt)) {
1385 			mutex_exit(&mpt->m_mutex);
1386 			mptsas_log(mpt, CE_WARN, "mptsas pm initialization "
1387 			    "failed");
1388 			goto fail;
1389 		}
1390 	}
1391 
1392 	/*
1393 	 * Initialize chip using Message Unit Reset, if allowed
1394 	 */
1395 	mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1396 	if (mptsas_init_chip(mpt, TRUE) == DDI_FAILURE) {
1397 		mutex_exit(&mpt->m_mutex);
1398 		mptsas_log(mpt, CE_WARN, "mptsas chip initialization failed");
1399 		goto fail;
1400 	}
1401 
1402 	mpt->m_targets = refhash_create(MPTSAS_TARGET_BUCKET_COUNT,
1403 	    mptsas_target_addr_hash, mptsas_target_addr_cmp,
1404 	    mptsas_target_free, sizeof (mptsas_target_t),
1405 	    offsetof(mptsas_target_t, m_link),
1406 	    offsetof(mptsas_target_t, m_addr), KM_SLEEP);
1407 
1408 	/*
1409 	 * The refhash for temporary targets uses the address of the target
1410 	 * struct itself as tag, so the tag offset is 0. See the implementation
1411 	 * of mptsas_tmp_target_hash() and mptsas_tmp_target_cmp().
1412 	 */
1413 	mpt->m_tmp_targets = refhash_create(MPTSAS_TMP_TARGET_BUCKET_COUNT,
1414 	    mptsas_tmp_target_hash, mptsas_tmp_target_cmp,
1415 	    mptsas_target_free, sizeof (mptsas_target_t),
1416 	    offsetof(mptsas_target_t, m_link), 0, KM_SLEEP);
1417 
1418 	/*
1419 	 * Fill in the phy_info structure and get the base WWID
1420 	 */
1421 	if (mptsas_get_manufacture_page5(mpt) == DDI_FAILURE) {
1422 		mptsas_log(mpt, CE_WARN,
1423 		    "mptsas_get_manufacture_page5 failed!");
1424 		goto fail;
1425 	}
1426 
1427 	if (mptsas_get_sas_io_unit_page_hndshk(mpt)) {
1428 		mptsas_log(mpt, CE_WARN,
1429 		    "mptsas_get_sas_io_unit_page_hndshk failed!");
1430 		goto fail;
1431 	}
1432 
1433 	if (mptsas_get_manufacture_page0(mpt) == DDI_FAILURE) {
1434 		mptsas_log(mpt, CE_WARN,
1435 		    "mptsas_get_manufacture_page0 failed!");
1436 		goto fail;
1437 	}
1438 
1439 	mutex_exit(&mpt->m_mutex);
1440 
1441 	/*
1442 	 * Register the iport for multiple port HBA
1443 	 */
1444 	mptsas_iport_register(mpt);
1445 
1446 	/*
1447 	 * initialize SCSI HBA transport structure
1448 	 */
1449 	if (mptsas_hba_setup(mpt) == FALSE)
1450 		goto fail;
1451 	hba_attach_setup++;
1452 
1453 	if (mptsas_smp_setup(mpt) == FALSE)
1454 		goto fail;
1455 	smp_attach_setup++;
1456 
1457 	if (mptsas_enc_setup(mpt) == FALSE)
1458 		goto fail;
1459 	enc_attach_setup++;
1460 
1461 	if (mptsas_cache_create(mpt) == FALSE)
1462 		goto fail;
1463 
1464 	mpt->m_scsi_reset_delay	= ddi_prop_get_int(DDI_DEV_T_ANY,
1465 	    dip, 0, "scsi-reset-delay",	SCSI_DEFAULT_RESET_DELAY);
1466 	if (mpt->m_scsi_reset_delay == 0) {
1467 		mptsas_log(mpt, CE_NOTE,
1468 		    "scsi_reset_delay of 0 is not recommended,"
1469 		    " resetting to SCSI_DEFAULT_RESET_DELAY\n");
1470 		mpt->m_scsi_reset_delay = SCSI_DEFAULT_RESET_DELAY;
1471 	}
1472 
1473 	/*
1474 	 * Initialize the wait and done FIFO queue
1475 	 */
1476 	mpt->m_donetail = &mpt->m_doneq;
1477 	mpt->m_waitqtail = &mpt->m_waitq;
1478 	mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
1479 	mpt->m_tx_draining = 0;
1480 
1481 	/*
1482 	 * ioc cmd queue initialize
1483 	 */
1484 	mpt->m_ioc_event_cmdtail = &mpt->m_ioc_event_cmdq;
1485 	mpt->m_dev_handle = 0xFFFF;
1486 
1487 	MPTSAS_ENABLE_INTR(mpt);
1488 
1489 	/*
1490 	 * enable event notification
1491 	 */
1492 	mutex_enter(&mpt->m_mutex);
1493 	if (mptsas_ioc_enable_event_notification(mpt)) {
1494 		mutex_exit(&mpt->m_mutex);
1495 		goto fail;
1496 	}
1497 	mutex_exit(&mpt->m_mutex);
1498 
1499 	/*
1500 	 * used for mptsas_watch
1501 	 */
1502 	mptsas_list_add(mpt);
1503 
1504 	mutex_enter(&mptsas_global_mutex);
1505 	if (mptsas_timeouts_enabled == 0) {
1506 		mptsas_scsi_watchdog_tick = ddi_prop_get_int(DDI_DEV_T_ANY,
1507 		    dip, 0, "scsi-watchdog-tick", DEFAULT_WD_TICK);
1508 
1509 		mptsas_tick = mptsas_scsi_watchdog_tick *
1510 		    drv_usectohz((clock_t)1000000);
1511 
1512 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
1513 		mptsas_timeouts_enabled = 1;
1514 	}
1515 	mutex_exit(&mptsas_global_mutex);
1516 	added_watchdog++;
1517 
1518 	/*
1519 	 * Initialize PHY info for smhba.
1520 	 * This requires watchdog to be enabled otherwise if interrupts
1521 	 * don't work the system will hang.
1522 	 */
1523 	if (mptsas_smhba_setup(mpt)) {
1524 		mptsas_log(mpt, CE_WARN, "mptsas phy initialization "
1525 		    "failed");
1526 		goto fail;
1527 	}
1528 
1529 	/* Check all dma handles allocated in attach */
1530 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl)
1531 	    != DDI_SUCCESS) ||
1532 	    (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl)
1533 	    != DDI_SUCCESS) ||
1534 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl)
1535 	    != DDI_SUCCESS) ||
1536 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl)
1537 	    != DDI_SUCCESS) ||
1538 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl)
1539 	    != DDI_SUCCESS) ||
1540 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl)
1541 	    != DDI_SUCCESS)) {
1542 		goto fail;
1543 	}
1544 
1545 	/* Check all acc handles allocated in attach */
1546 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
1547 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl)
1548 	    != DDI_SUCCESS) ||
1549 	    (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl)
1550 	    != DDI_SUCCESS) ||
1551 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl)
1552 	    != DDI_SUCCESS) ||
1553 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl)
1554 	    != DDI_SUCCESS) ||
1555 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl)
1556 	    != DDI_SUCCESS) ||
1557 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl)
1558 	    != DDI_SUCCESS) ||
1559 	    (mptsas_check_acc_handle(mpt->m_config_handle)
1560 	    != DDI_SUCCESS)) {
1561 		goto fail;
1562 	}
1563 
1564 	/*
1565 	 * After this point, we are not going to fail the attach.
1566 	 */
1567 
1568 	/* Let the UFM susbsystem know we're ready to receive callbacks */
1569 	ddi_ufm_update(mpt->m_ufmh);
1570 
1571 	/* Print message of HBA present */
1572 	ddi_report_dev(dip);
1573 
1574 	/* report idle status to pm framework */
1575 	if (mpt->m_options & MPTSAS_OPT_PM) {
1576 		(void) pm_idle_component(dip, 0);
1577 	}
1578 
1579 	return (DDI_SUCCESS);
1580 
1581 fail:
1582 	mptsas_log(mpt, CE_WARN, "attach failed");
1583 	mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
1584 	ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
1585 	if (mpt) {
1586 		/* deallocate in reverse order */
1587 		if (added_watchdog) {
1588 			mptsas_list_del(mpt);
1589 			mutex_enter(&mptsas_global_mutex);
1590 
1591 			if (mptsas_timeout_id && (mptsas_head == NULL)) {
1592 				timeout_id_t tid = mptsas_timeout_id;
1593 				mptsas_timeouts_enabled = 0;
1594 				mptsas_timeout_id = 0;
1595 				mutex_exit(&mptsas_global_mutex);
1596 				(void) untimeout(tid);
1597 				mutex_enter(&mptsas_global_mutex);
1598 			}
1599 			mutex_exit(&mptsas_global_mutex);
1600 		}
1601 
1602 		mptsas_cache_destroy(mpt);
1603 
1604 		if (smp_attach_setup) {
1605 			mptsas_smp_teardown(mpt);
1606 		}
1607 		if (enc_attach_setup) {
1608 			mptsas_enc_teardown(mpt);
1609 		}
1610 		if (hba_attach_setup) {
1611 			mptsas_hba_teardown(mpt);
1612 		}
1613 
1614 		if (mpt->m_tmp_targets)
1615 			refhash_destroy(mpt->m_tmp_targets);
1616 		if (mpt->m_targets)
1617 			refhash_destroy(mpt->m_targets);
1618 		if (mpt->m_smp_targets)
1619 			refhash_destroy(mpt->m_smp_targets);
1620 
1621 		if (mpt->m_active) {
1622 			mptsas_free_active_slots(mpt);
1623 		}
1624 		if (intr_added) {
1625 			mptsas_unregister_intrs(mpt);
1626 		}
1627 
1628 		if (doneq_thread_create) {
1629 			mutex_enter(&mpt->m_doneq_mutex);
1630 			doneq_thread_num = mpt->m_doneq_thread_n;
1631 			for (j = 0; j < mpt->m_doneq_thread_n; j++) {
1632 				mutex_enter(&mpt->m_doneq_thread_id[j].mutex);
1633 				mpt->m_doneq_thread_id[j].flag &=
1634 				    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1635 				cv_signal(&mpt->m_doneq_thread_id[j].cv);
1636 				mutex_exit(&mpt->m_doneq_thread_id[j].mutex);
1637 			}
1638 			while (mpt->m_doneq_thread_n) {
1639 				cv_wait(&mpt->m_doneq_thread_cv,
1640 				    &mpt->m_doneq_mutex);
1641 			}
1642 			for (j = 0; j < doneq_thread_num; j++) {
1643 				cv_destroy(&mpt->m_doneq_thread_id[j].cv);
1644 				mutex_destroy(&mpt->m_doneq_thread_id[j].mutex);
1645 			}
1646 			kmem_free(mpt->m_doneq_thread_id,
1647 			    sizeof (mptsas_doneq_thread_list_t)
1648 			    * doneq_thread_num);
1649 			mutex_exit(&mpt->m_doneq_mutex);
1650 			cv_destroy(&mpt->m_doneq_thread_cv);
1651 			mutex_destroy(&mpt->m_doneq_mutex);
1652 		}
1653 		if (event_taskq_create) {
1654 			ddi_taskq_destroy(mpt->m_event_taskq);
1655 		}
1656 		if (dr_taskq_create) {
1657 			ddi_taskq_destroy(mpt->m_dr_taskq);
1658 		}
1659 		if (mutex_init_done) {
1660 			mutex_destroy(&mpt->m_tx_waitq_mutex);
1661 			mutex_destroy(&mpt->m_passthru_mutex);
1662 			mutex_destroy(&mpt->m_mutex);
1663 			for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
1664 				mutex_destroy(
1665 				    &mpt->m_phy_info[i].smhba_info.phy_mutex);
1666 			}
1667 			cv_destroy(&mpt->m_cv);
1668 			cv_destroy(&mpt->m_passthru_cv);
1669 			cv_destroy(&mpt->m_fw_cv);
1670 			cv_destroy(&mpt->m_config_cv);
1671 			cv_destroy(&mpt->m_fw_diag_cv);
1672 			cv_destroy(&mpt->m_extreq_sense_refcount_cv);
1673 		}
1674 
1675 		if (map_setup) {
1676 			mptsas_cfg_fini(mpt);
1677 		}
1678 		if (config_setup) {
1679 			mptsas_config_space_fini(mpt);
1680 		}
1681 		mptsas_free_handshake_msg(mpt);
1682 		mptsas_hba_fini(mpt);
1683 
1684 		mptsas_fm_fini(mpt);
1685 		ddi_soft_state_free(mptsas_state, instance);
1686 		ddi_prop_remove_all(dip);
1687 	}
1688 	return (DDI_FAILURE);
1689 }
1690 
1691 static int
mptsas_suspend(dev_info_t * devi)1692 mptsas_suspend(dev_info_t *devi)
1693 {
1694 	mptsas_t	*mpt, *g;
1695 	scsi_hba_tran_t	*tran;
1696 
1697 	if (scsi_hba_iport_unit_address(devi)) {
1698 		return (DDI_SUCCESS);
1699 	}
1700 
1701 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1702 		return (DDI_SUCCESS);
1703 
1704 	mpt = TRAN2MPT(tran);
1705 	if (!mpt) {
1706 		return (DDI_SUCCESS);
1707 	}
1708 
1709 	mutex_enter(&mpt->m_mutex);
1710 
1711 	if (mpt->m_suspended++) {
1712 		mutex_exit(&mpt->m_mutex);
1713 		return (DDI_SUCCESS);
1714 	}
1715 
1716 	/*
1717 	 * Cancel timeout threads for this mpt
1718 	 */
1719 	if (mpt->m_quiesce_timeid) {
1720 		timeout_id_t tid = mpt->m_quiesce_timeid;
1721 		mpt->m_quiesce_timeid = 0;
1722 		mutex_exit(&mpt->m_mutex);
1723 		(void) untimeout(tid);
1724 		mutex_enter(&mpt->m_mutex);
1725 	}
1726 
1727 	if (mpt->m_restart_cmd_timeid) {
1728 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
1729 		mpt->m_restart_cmd_timeid = 0;
1730 		mutex_exit(&mpt->m_mutex);
1731 		(void) untimeout(tid);
1732 		mutex_enter(&mpt->m_mutex);
1733 	}
1734 
1735 	mutex_exit(&mpt->m_mutex);
1736 
1737 	(void) pm_idle_component(mpt->m_dip, 0);
1738 
1739 	/*
1740 	 * Cancel watch threads if all mpts suspended
1741 	 */
1742 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
1743 	for (g = mptsas_head; g != NULL; g = g->m_next) {
1744 		if (!g->m_suspended)
1745 			break;
1746 	}
1747 	rw_exit(&mptsas_global_rwlock);
1748 
1749 	mutex_enter(&mptsas_global_mutex);
1750 	if (g == NULL) {
1751 		timeout_id_t tid;
1752 
1753 		mptsas_timeouts_enabled = 0;
1754 		if (mptsas_timeout_id) {
1755 			tid = mptsas_timeout_id;
1756 			mptsas_timeout_id = 0;
1757 			mutex_exit(&mptsas_global_mutex);
1758 			(void) untimeout(tid);
1759 			mutex_enter(&mptsas_global_mutex);
1760 		}
1761 		if (mptsas_reset_watch) {
1762 			tid = mptsas_reset_watch;
1763 			mptsas_reset_watch = 0;
1764 			mutex_exit(&mptsas_global_mutex);
1765 			(void) untimeout(tid);
1766 			mutex_enter(&mptsas_global_mutex);
1767 		}
1768 	}
1769 	mutex_exit(&mptsas_global_mutex);
1770 
1771 	mutex_enter(&mpt->m_mutex);
1772 
1773 	/*
1774 	 * If this mpt is not in full power(PM_LEVEL_D0), just return.
1775 	 */
1776 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
1777 	    (mpt->m_power_level != PM_LEVEL_D0)) {
1778 		mutex_exit(&mpt->m_mutex);
1779 		return (DDI_SUCCESS);
1780 	}
1781 
1782 	/* Disable HBA interrupts in hardware */
1783 	MPTSAS_DISABLE_INTR(mpt);
1784 	/*
1785 	 * Send RAID action system shutdown to sync IR
1786 	 */
1787 	mptsas_raid_action_system_shutdown(mpt);
1788 
1789 	mutex_exit(&mpt->m_mutex);
1790 
1791 	/* drain the taskq */
1792 	ddi_taskq_wait(mpt->m_event_taskq);
1793 	ddi_taskq_wait(mpt->m_dr_taskq);
1794 
1795 	return (DDI_SUCCESS);
1796 }
1797 
1798 #ifdef	__sparc
1799 /*ARGSUSED*/
1800 static int
mptsas_reset(dev_info_t * devi,ddi_reset_cmd_t cmd)1801 mptsas_reset(dev_info_t *devi, ddi_reset_cmd_t cmd)
1802 {
1803 	mptsas_t	*mpt;
1804 	scsi_hba_tran_t *tran;
1805 
1806 	/*
1807 	 * If this call is for iport, just return.
1808 	 */
1809 	if (scsi_hba_iport_unit_address(devi))
1810 		return (DDI_SUCCESS);
1811 
1812 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1813 		return (DDI_SUCCESS);
1814 
1815 	if ((mpt = TRAN2MPT(tran)) == NULL)
1816 		return (DDI_SUCCESS);
1817 
1818 	/*
1819 	 * Send RAID action system shutdown to sync IR.  Disable HBA
1820 	 * interrupts in hardware first.
1821 	 */
1822 	MPTSAS_DISABLE_INTR(mpt);
1823 	mptsas_raid_action_system_shutdown(mpt);
1824 
1825 	return (DDI_SUCCESS);
1826 }
1827 #else /* __sparc */
1828 /*
1829  * quiesce(9E) entry point.
1830  *
1831  * This function is called when the system is single-threaded at high
1832  * PIL with preemption disabled. Therefore, this function must not be
1833  * blocked.
1834  *
1835  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1836  * DDI_FAILURE indicates an error condition and should almost never happen.
1837  */
1838 static int
mptsas_quiesce(dev_info_t * devi)1839 mptsas_quiesce(dev_info_t *devi)
1840 {
1841 	mptsas_t	*mpt;
1842 	scsi_hba_tran_t *tran;
1843 
1844 	/*
1845 	 * If this call is for iport, just return.
1846 	 */
1847 	if (scsi_hba_iport_unit_address(devi))
1848 		return (DDI_SUCCESS);
1849 
1850 	if ((tran = ddi_get_driver_private(devi)) == NULL)
1851 		return (DDI_SUCCESS);
1852 
1853 	if ((mpt = TRAN2MPT(tran)) == NULL)
1854 		return (DDI_SUCCESS);
1855 
1856 	/* Disable HBA interrupts in hardware */
1857 	MPTSAS_DISABLE_INTR(mpt);
1858 	/* Send RAID action system shutdonw to sync IR */
1859 	mptsas_raid_action_system_shutdown(mpt);
1860 
1861 	return (DDI_SUCCESS);
1862 }
1863 #endif	/* __sparc */
1864 
1865 /*
1866  * detach(9E).	Remove all device allocations and system resources;
1867  * disable device interrupts.
1868  * Return DDI_SUCCESS if done; DDI_FAILURE if there's a problem.
1869  */
1870 static int
mptsas_detach(dev_info_t * devi,ddi_detach_cmd_t cmd)1871 mptsas_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1872 {
1873 	/* CONSTCOND */
1874 	ASSERT(NO_COMPETING_THREADS);
1875 	NDBG0(("mptsas_detach: dip=0x%p cmd=0x%p", (void *)devi, (void *)cmd));
1876 
1877 	switch (cmd) {
1878 	case DDI_DETACH:
1879 		return (mptsas_do_detach(devi));
1880 
1881 	case DDI_SUSPEND:
1882 		return (mptsas_suspend(devi));
1883 
1884 	default:
1885 		return (DDI_FAILURE);
1886 	}
1887 	/* NOTREACHED */
1888 }
1889 
1890 static int
mptsas_do_detach(dev_info_t * dip)1891 mptsas_do_detach(dev_info_t *dip)
1892 {
1893 	mptsas_t	*mpt;
1894 	scsi_hba_tran_t	*tran;
1895 	mdi_pathinfo_t	*pip = NULL;
1896 	int		i;
1897 	int		doneq_thread_num = 0;
1898 
1899 	NDBG0(("mptsas_do_detach: dip=0x%p", (void *)dip));
1900 
1901 	if ((tran = ndi_flavorv_get(dip, SCSA_FLAVOR_SCSI_DEVICE)) == NULL)
1902 		return (DDI_FAILURE);
1903 
1904 	mpt = TRAN2MPT(tran);
1905 	if (!mpt) {
1906 		return (DDI_FAILURE);
1907 	}
1908 
1909 	ddi_ufm_fini(mpt->m_ufmh);
1910 
1911 	/*
1912 	 * Still have pathinfo child, should not detach mpt driver
1913 	 */
1914 	if (scsi_hba_iport_unit_address(dip)) {
1915 		if (mpt->m_mpxio_enable) {
1916 			/*
1917 			 * MPxIO enabled for the iport
1918 			 */
1919 			ndi_devi_enter(scsi_vhci_dip);
1920 			ndi_devi_enter(dip);
1921 			while ((pip = mdi_get_next_client_path(dip, NULL)) !=
1922 			    NULL) {
1923 				if (mdi_pi_free(pip, 0) == MDI_SUCCESS) {
1924 					continue;
1925 				}
1926 				ndi_devi_exit(dip);
1927 				ndi_devi_exit(scsi_vhci_dip);
1928 				NDBG12(("detach failed because of "
1929 				    "outstanding path info"));
1930 				return (DDI_FAILURE);
1931 			}
1932 			ndi_devi_exit(dip);
1933 			ndi_devi_exit(scsi_vhci_dip);
1934 			(void) mdi_phci_unregister(dip, 0);
1935 		}
1936 
1937 		ddi_prop_remove_all(dip);
1938 
1939 		return (DDI_SUCCESS);
1940 	}
1941 
1942 	/* Make sure power level is D0 before accessing registers */
1943 	if (mpt->m_options & MPTSAS_OPT_PM) {
1944 		(void) pm_busy_component(dip, 0);
1945 		if (mpt->m_power_level != PM_LEVEL_D0) {
1946 			if (pm_raise_power(dip, 0, PM_LEVEL_D0) !=
1947 			    DDI_SUCCESS) {
1948 				mptsas_log(mpt, CE_WARN,
1949 				    "mptsas%d: Raise power request failed.",
1950 				    mpt->m_instance);
1951 				(void) pm_idle_component(dip, 0);
1952 				return (DDI_FAILURE);
1953 			}
1954 		}
1955 	}
1956 
1957 	/*
1958 	 * Send RAID action system shutdown to sync IR.  After action, send a
1959 	 * Message Unit Reset. Since after that DMA resource will be freed,
1960 	 * set ioc to READY state will avoid HBA initiated DMA operation.
1961 	 */
1962 	mutex_enter(&mpt->m_mutex);
1963 	MPTSAS_DISABLE_INTR(mpt);
1964 	mptsas_raid_action_system_shutdown(mpt);
1965 	mpt->m_softstate |= MPTSAS_SS_MSG_UNIT_RESET;
1966 	(void) mptsas_ioc_reset(mpt, FALSE);
1967 	mutex_exit(&mpt->m_mutex);
1968 	mptsas_rem_intrs(mpt);
1969 	ddi_taskq_destroy(mpt->m_event_taskq);
1970 	ddi_taskq_destroy(mpt->m_dr_taskq);
1971 
1972 	if (mpt->m_doneq_thread_n) {
1973 		mutex_enter(&mpt->m_doneq_mutex);
1974 		doneq_thread_num = mpt->m_doneq_thread_n;
1975 		for (i = 0; i < mpt->m_doneq_thread_n; i++) {
1976 			mutex_enter(&mpt->m_doneq_thread_id[i].mutex);
1977 			mpt->m_doneq_thread_id[i].flag &=
1978 			    (~MPTSAS_DONEQ_THREAD_ACTIVE);
1979 			cv_signal(&mpt->m_doneq_thread_id[i].cv);
1980 			mutex_exit(&mpt->m_doneq_thread_id[i].mutex);
1981 		}
1982 		while (mpt->m_doneq_thread_n) {
1983 			cv_wait(&mpt->m_doneq_thread_cv,
1984 			    &mpt->m_doneq_mutex);
1985 		}
1986 		for (i = 0;  i < doneq_thread_num; i++) {
1987 			cv_destroy(&mpt->m_doneq_thread_id[i].cv);
1988 			mutex_destroy(&mpt->m_doneq_thread_id[i].mutex);
1989 		}
1990 		kmem_free(mpt->m_doneq_thread_id,
1991 		    sizeof (mptsas_doneq_thread_list_t)
1992 		    * doneq_thread_num);
1993 		mutex_exit(&mpt->m_doneq_mutex);
1994 		cv_destroy(&mpt->m_doneq_thread_cv);
1995 		mutex_destroy(&mpt->m_doneq_mutex);
1996 	}
1997 
1998 	scsi_hba_reset_notify_tear_down(mpt->m_reset_notify_listf);
1999 
2000 	mptsas_list_del(mpt);
2001 
2002 	/*
2003 	 * Cancel timeout threads for this mpt
2004 	 */
2005 	mutex_enter(&mpt->m_mutex);
2006 	if (mpt->m_quiesce_timeid) {
2007 		timeout_id_t tid = mpt->m_quiesce_timeid;
2008 		mpt->m_quiesce_timeid = 0;
2009 		mutex_exit(&mpt->m_mutex);
2010 		(void) untimeout(tid);
2011 		mutex_enter(&mpt->m_mutex);
2012 	}
2013 
2014 	if (mpt->m_restart_cmd_timeid) {
2015 		timeout_id_t tid = mpt->m_restart_cmd_timeid;
2016 		mpt->m_restart_cmd_timeid = 0;
2017 		mutex_exit(&mpt->m_mutex);
2018 		(void) untimeout(tid);
2019 		mutex_enter(&mpt->m_mutex);
2020 	}
2021 
2022 	mutex_exit(&mpt->m_mutex);
2023 
2024 	/*
2025 	 * last mpt? ... if active, CANCEL watch threads.
2026 	 */
2027 	mutex_enter(&mptsas_global_mutex);
2028 	if (mptsas_head == NULL) {
2029 		timeout_id_t tid;
2030 		/*
2031 		 * Clear mptsas_timeouts_enable so that the watch thread
2032 		 * gets restarted on DDI_ATTACH
2033 		 */
2034 		mptsas_timeouts_enabled = 0;
2035 		if (mptsas_timeout_id) {
2036 			tid = mptsas_timeout_id;
2037 			mptsas_timeout_id = 0;
2038 			mutex_exit(&mptsas_global_mutex);
2039 			(void) untimeout(tid);
2040 			mutex_enter(&mptsas_global_mutex);
2041 		}
2042 		if (mptsas_reset_watch) {
2043 			tid = mptsas_reset_watch;
2044 			mptsas_reset_watch = 0;
2045 			mutex_exit(&mptsas_global_mutex);
2046 			(void) untimeout(tid);
2047 			mutex_enter(&mptsas_global_mutex);
2048 		}
2049 	}
2050 	mutex_exit(&mptsas_global_mutex);
2051 
2052 	/*
2053 	 * Delete Phy stats
2054 	 */
2055 	mptsas_destroy_phy_stats(mpt);
2056 
2057 	mptsas_destroy_hashes(mpt);
2058 
2059 	/*
2060 	 * Delete nt_active.
2061 	 */
2062 	mutex_enter(&mpt->m_mutex);
2063 	mptsas_free_active_slots(mpt);
2064 	mutex_exit(&mpt->m_mutex);
2065 
2066 	/* deallocate everything that was allocated in mptsas_attach */
2067 	mptsas_cache_destroy(mpt);
2068 
2069 	mptsas_hba_fini(mpt);
2070 	mptsas_cfg_fini(mpt);
2071 
2072 	/* Lower the power informing PM Framework */
2073 	if (mpt->m_options & MPTSAS_OPT_PM) {
2074 		if (pm_lower_power(dip, 0, PM_LEVEL_D3) != DDI_SUCCESS)
2075 			mptsas_log(mpt, CE_WARN,
2076 			    "!mptsas%d: Lower power request failed "
2077 			    "during detach, ignoring.",
2078 			    mpt->m_instance);
2079 	}
2080 
2081 	mutex_destroy(&mpt->m_tx_waitq_mutex);
2082 	mutex_destroy(&mpt->m_passthru_mutex);
2083 	mutex_destroy(&mpt->m_mutex);
2084 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
2085 		mutex_destroy(&mpt->m_phy_info[i].smhba_info.phy_mutex);
2086 	}
2087 	cv_destroy(&mpt->m_cv);
2088 	cv_destroy(&mpt->m_passthru_cv);
2089 	cv_destroy(&mpt->m_fw_cv);
2090 	cv_destroy(&mpt->m_config_cv);
2091 	cv_destroy(&mpt->m_fw_diag_cv);
2092 	cv_destroy(&mpt->m_extreq_sense_refcount_cv);
2093 
2094 	mptsas_smp_teardown(mpt);
2095 	mptsas_enc_teardown(mpt);
2096 	mptsas_hba_teardown(mpt);
2097 
2098 	mptsas_config_space_fini(mpt);
2099 
2100 	mptsas_free_handshake_msg(mpt);
2101 
2102 	mptsas_fm_fini(mpt);
2103 	ddi_soft_state_free(mptsas_state, ddi_get_instance(dip));
2104 	ddi_prop_remove_all(dip);
2105 
2106 	return (DDI_SUCCESS);
2107 }
2108 
2109 static void
mptsas_list_add(mptsas_t * mpt)2110 mptsas_list_add(mptsas_t *mpt)
2111 {
2112 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
2113 
2114 	if (mptsas_head == NULL) {
2115 		mptsas_head = mpt;
2116 	} else {
2117 		mptsas_tail->m_next = mpt;
2118 	}
2119 	mptsas_tail = mpt;
2120 	rw_exit(&mptsas_global_rwlock);
2121 }
2122 
2123 static void
mptsas_list_del(mptsas_t * mpt)2124 mptsas_list_del(mptsas_t *mpt)
2125 {
2126 	mptsas_t *m;
2127 	/*
2128 	 * Remove device instance from the global linked list
2129 	 */
2130 	rw_enter(&mptsas_global_rwlock, RW_WRITER);
2131 	if (mptsas_head == mpt) {
2132 		m = mptsas_head = mpt->m_next;
2133 	} else {
2134 		for (m = mptsas_head; m != NULL; m = m->m_next) {
2135 			if (m->m_next == mpt) {
2136 				m->m_next = mpt->m_next;
2137 				break;
2138 			}
2139 		}
2140 		if (m == NULL) {
2141 			mptsas_log(mpt, CE_PANIC, "Not in softc list!");
2142 		}
2143 	}
2144 
2145 	if (mptsas_tail == mpt) {
2146 		mptsas_tail = m;
2147 	}
2148 	rw_exit(&mptsas_global_rwlock);
2149 }
2150 
2151 static int
mptsas_alloc_handshake_msg(mptsas_t * mpt,size_t alloc_size)2152 mptsas_alloc_handshake_msg(mptsas_t *mpt, size_t alloc_size)
2153 {
2154 	ddi_dma_attr_t	task_dma_attrs;
2155 
2156 	mpt->m_hshk_dma_size = 0;
2157 	task_dma_attrs = mpt->m_msg_dma_attr;
2158 	task_dma_attrs.dma_attr_sgllen = 1;
2159 	task_dma_attrs.dma_attr_granular = (uint32_t)(alloc_size);
2160 
2161 	/* allocate Task Management ddi_dma resources */
2162 	if (mptsas_dma_addr_create(mpt, task_dma_attrs,
2163 	    &mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl, &mpt->m_hshk_memp,
2164 	    alloc_size, NULL) == FALSE) {
2165 		return (DDI_FAILURE);
2166 	}
2167 	mpt->m_hshk_dma_size = alloc_size;
2168 
2169 	return (DDI_SUCCESS);
2170 }
2171 
2172 static void
mptsas_free_handshake_msg(mptsas_t * mpt)2173 mptsas_free_handshake_msg(mptsas_t *mpt)
2174 {
2175 	if (mpt->m_hshk_dma_size == 0)
2176 		return;
2177 	mptsas_dma_addr_destroy(&mpt->m_hshk_dma_hdl, &mpt->m_hshk_acc_hdl);
2178 	mpt->m_hshk_dma_size = 0;
2179 }
2180 
2181 static int
mptsas_hba_setup(mptsas_t * mpt)2182 mptsas_hba_setup(mptsas_t *mpt)
2183 {
2184 	scsi_hba_tran_t		*hba_tran;
2185 	int			tran_flags;
2186 
2187 	/* Allocate a transport structure */
2188 	hba_tran = mpt->m_tran = scsi_hba_tran_alloc(mpt->m_dip,
2189 	    SCSI_HBA_CANSLEEP);
2190 	ASSERT(mpt->m_tran != NULL);
2191 
2192 	hba_tran->tran_hba_private	= mpt;
2193 	hba_tran->tran_tgt_private	= NULL;
2194 
2195 	hba_tran->tran_tgt_init		= mptsas_scsi_tgt_init;
2196 	hba_tran->tran_tgt_free		= mptsas_scsi_tgt_free;
2197 
2198 	hba_tran->tran_start		= mptsas_scsi_start;
2199 	hba_tran->tran_reset		= mptsas_scsi_reset;
2200 	hba_tran->tran_abort		= mptsas_scsi_abort;
2201 	hba_tran->tran_getcap		= mptsas_scsi_getcap;
2202 	hba_tran->tran_setcap		= mptsas_scsi_setcap;
2203 	hba_tran->tran_init_pkt		= mptsas_scsi_init_pkt;
2204 	hba_tran->tran_destroy_pkt	= mptsas_scsi_destroy_pkt;
2205 
2206 	hba_tran->tran_dmafree		= mptsas_scsi_dmafree;
2207 	hba_tran->tran_sync_pkt		= mptsas_scsi_sync_pkt;
2208 	hba_tran->tran_reset_notify	= mptsas_scsi_reset_notify;
2209 
2210 	hba_tran->tran_get_bus_addr	= mptsas_get_bus_addr;
2211 	hba_tran->tran_get_name		= mptsas_get_name;
2212 
2213 	hba_tran->tran_quiesce		= mptsas_scsi_quiesce;
2214 	hba_tran->tran_unquiesce	= mptsas_scsi_unquiesce;
2215 	hba_tran->tran_bus_reset	= NULL;
2216 
2217 	hba_tran->tran_add_eventcall	= NULL;
2218 	hba_tran->tran_get_eventcookie	= NULL;
2219 	hba_tran->tran_post_event	= NULL;
2220 	hba_tran->tran_remove_eventcall	= NULL;
2221 
2222 	hba_tran->tran_bus_config	= mptsas_bus_config;
2223 
2224 	hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2225 
2226 	/*
2227 	 * All children of the HBA are iports. We need tran was cloned.
2228 	 * So we pass the flags to SCSA. SCSI_HBA_TRAN_CLONE will be
2229 	 * inherited to iport's tran vector.
2230 	 */
2231 	tran_flags = (SCSI_HBA_HBA | SCSI_HBA_TRAN_CLONE);
2232 
2233 	if (scsi_hba_attach_setup(mpt->m_dip, &mpt->m_msg_dma_attr,
2234 	    hba_tran, tran_flags) != DDI_SUCCESS) {
2235 		mptsas_log(mpt, CE_WARN, "hba attach setup failed");
2236 		scsi_hba_tran_free(hba_tran);
2237 		mpt->m_tran = NULL;
2238 		return (FALSE);
2239 	}
2240 	return (TRUE);
2241 }
2242 
2243 static void
mptsas_hba_teardown(mptsas_t * mpt)2244 mptsas_hba_teardown(mptsas_t *mpt)
2245 {
2246 	(void) scsi_hba_detach(mpt->m_dip);
2247 	if (mpt->m_tran != NULL) {
2248 		scsi_hba_tran_free(mpt->m_tran);
2249 		mpt->m_tran = NULL;
2250 	}
2251 }
2252 
2253 static void
mptsas_iport_register(mptsas_t * mpt)2254 mptsas_iport_register(mptsas_t *mpt)
2255 {
2256 	int i, j;
2257 	mptsas_phymask_t	mask = 0x0;
2258 	/*
2259 	 * initial value of mask is 0
2260 	 */
2261 	mutex_enter(&mpt->m_mutex);
2262 	for (i = 0; i < mpt->m_num_phys; i++) {
2263 		mptsas_phymask_t phy_mask = 0x0;
2264 		char phy_mask_name[MPTSAS_MAX_PHYS];
2265 		uint8_t current_port;
2266 
2267 		if (mpt->m_phy_info[i].attached_devhdl == 0)
2268 			continue;
2269 
2270 		bzero(phy_mask_name, sizeof (phy_mask_name));
2271 
2272 		current_port = mpt->m_phy_info[i].port_num;
2273 
2274 		if ((mask & (1 << i)) != 0)
2275 			continue;
2276 
2277 		for (j = 0; j < mpt->m_num_phys; j++) {
2278 			if (mpt->m_phy_info[j].attached_devhdl &&
2279 			    (mpt->m_phy_info[j].port_num == current_port)) {
2280 				phy_mask |= (1 << j);
2281 			}
2282 		}
2283 		mask = mask | phy_mask;
2284 
2285 		for (j = 0; j < mpt->m_num_phys; j++) {
2286 			if ((phy_mask >> j) & 0x01) {
2287 				mpt->m_phy_info[j].phy_mask = phy_mask;
2288 			}
2289 		}
2290 
2291 		(void) sprintf(phy_mask_name, "%x", phy_mask);
2292 
2293 		mutex_exit(&mpt->m_mutex);
2294 		/*
2295 		 * register a iport
2296 		 */
2297 		(void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
2298 		mutex_enter(&mpt->m_mutex);
2299 	}
2300 	mutex_exit(&mpt->m_mutex);
2301 	/*
2302 	 * register a virtual port for RAID volume always
2303 	 */
2304 	(void) scsi_hba_iport_register(mpt->m_dip, "v0");
2305 
2306 }
2307 
2308 static int
mptsas_smp_setup(mptsas_t * mpt)2309 mptsas_smp_setup(mptsas_t *mpt)
2310 {
2311 	mpt->m_smptran = smp_hba_tran_alloc(mpt->m_dip);
2312 	ASSERT(mpt->m_smptran != NULL);
2313 	mpt->m_smptran->smp_tran_hba_private = mpt;
2314 	mpt->m_smptran->smp_tran_start = mptsas_smp_start;
2315 	if (smp_hba_attach_setup(mpt->m_dip, mpt->m_smptran) != DDI_SUCCESS) {
2316 		mptsas_log(mpt, CE_WARN, "smp attach setup failed");
2317 		smp_hba_tran_free(mpt->m_smptran);
2318 		mpt->m_smptran = NULL;
2319 		return (FALSE);
2320 	}
2321 	/*
2322 	 * Initialize smp hash table
2323 	 */
2324 	mpt->m_smp_targets = refhash_create(MPTSAS_SMP_BUCKET_COUNT,
2325 	    mptsas_target_addr_hash, mptsas_target_addr_cmp,
2326 	    mptsas_smp_free, sizeof (mptsas_smp_t),
2327 	    offsetof(mptsas_smp_t, m_link), offsetof(mptsas_smp_t, m_addr),
2328 	    KM_SLEEP);
2329 	mpt->m_smp_devhdl = 0xFFFF;
2330 
2331 	return (TRUE);
2332 }
2333 
2334 static void
mptsas_smp_teardown(mptsas_t * mpt)2335 mptsas_smp_teardown(mptsas_t *mpt)
2336 {
2337 	(void) smp_hba_detach(mpt->m_dip);
2338 	if (mpt->m_smptran != NULL) {
2339 		smp_hba_tran_free(mpt->m_smptran);
2340 		mpt->m_smptran = NULL;
2341 	}
2342 	mpt->m_smp_devhdl = 0;
2343 }
2344 
2345 static int
mptsas_enc_setup(mptsas_t * mpt)2346 mptsas_enc_setup(mptsas_t *mpt)
2347 {
2348 	list_create(&mpt->m_enclosures, sizeof (mptsas_enclosure_t),
2349 	    offsetof(mptsas_enclosure_t, me_link));
2350 	return (TRUE);
2351 }
2352 
2353 static void
mptsas_enc_free(mptsas_enclosure_t * mep)2354 mptsas_enc_free(mptsas_enclosure_t *mep)
2355 {
2356 	if (mep == NULL)
2357 		return;
2358 	if (mep->me_slotleds != NULL) {
2359 		VERIFY3U(mep->me_nslots, >, 0);
2360 		kmem_free(mep->me_slotleds, sizeof (uint8_t) * mep->me_nslots);
2361 	}
2362 	kmem_free(mep, sizeof (mptsas_enclosure_t));
2363 }
2364 
2365 static void
mptsas_enc_teardown(mptsas_t * mpt)2366 mptsas_enc_teardown(mptsas_t *mpt)
2367 {
2368 	mptsas_enclosure_t *mep;
2369 
2370 	while ((mep = list_remove_head(&mpt->m_enclosures)) != NULL) {
2371 		mptsas_enc_free(mep);
2372 	}
2373 	list_destroy(&mpt->m_enclosures);
2374 }
2375 
2376 static mptsas_enclosure_t *
mptsas_enc_lookup(mptsas_t * mpt,uint16_t hdl)2377 mptsas_enc_lookup(mptsas_t *mpt, uint16_t hdl)
2378 {
2379 	mptsas_enclosure_t *mep;
2380 
2381 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
2382 
2383 	for (mep = list_head(&mpt->m_enclosures); mep != NULL;
2384 	    mep = list_next(&mpt->m_enclosures, mep)) {
2385 		if (hdl == mep->me_enchdl) {
2386 			return (mep);
2387 		}
2388 	}
2389 
2390 	return (NULL);
2391 }
2392 
2393 static int
mptsas_cache_create(mptsas_t * mpt)2394 mptsas_cache_create(mptsas_t *mpt)
2395 {
2396 	int instance = mpt->m_instance;
2397 	char buf[64];
2398 
2399 	/*
2400 	 * create kmem cache for packets
2401 	 */
2402 	(void) sprintf(buf, "mptsas%d_cache", instance);
2403 	mpt->m_kmem_cache = kmem_cache_create(buf,
2404 	    sizeof (struct mptsas_cmd) + scsi_pkt_size(), 8,
2405 	    mptsas_kmem_cache_constructor, mptsas_kmem_cache_destructor,
2406 	    NULL, (void *)mpt, NULL, 0);
2407 
2408 	if (mpt->m_kmem_cache == NULL) {
2409 		mptsas_log(mpt, CE_WARN, "creating kmem cache failed");
2410 		return (FALSE);
2411 	}
2412 
2413 	/*
2414 	 * create kmem cache for extra SGL frames if SGL cannot
2415 	 * be accomodated into main request frame.
2416 	 */
2417 	(void) sprintf(buf, "mptsas%d_cache_frames", instance);
2418 	mpt->m_cache_frames = kmem_cache_create(buf,
2419 	    sizeof (mptsas_cache_frames_t), 8,
2420 	    mptsas_cache_frames_constructor, mptsas_cache_frames_destructor,
2421 	    NULL, (void *)mpt, NULL, 0);
2422 
2423 	if (mpt->m_cache_frames == NULL) {
2424 		mptsas_log(mpt, CE_WARN, "creating cache for frames failed");
2425 		return (FALSE);
2426 	}
2427 
2428 	return (TRUE);
2429 }
2430 
2431 static void
mptsas_cache_destroy(mptsas_t * mpt)2432 mptsas_cache_destroy(mptsas_t *mpt)
2433 {
2434 	/* deallocate in reverse order */
2435 	if (mpt->m_cache_frames) {
2436 		kmem_cache_destroy(mpt->m_cache_frames);
2437 		mpt->m_cache_frames = NULL;
2438 	}
2439 	if (mpt->m_kmem_cache) {
2440 		kmem_cache_destroy(mpt->m_kmem_cache);
2441 		mpt->m_kmem_cache = NULL;
2442 	}
2443 }
2444 
2445 static int
mptsas_power(dev_info_t * dip,int component,int level)2446 mptsas_power(dev_info_t *dip, int component, int level)
2447 {
2448 #ifndef __lock_lint
2449 	_NOTE(ARGUNUSED(component))
2450 #endif
2451 	mptsas_t	*mpt;
2452 	int		rval = DDI_SUCCESS;
2453 	int		polls = 0;
2454 	uint32_t	ioc_status;
2455 
2456 	if (scsi_hba_iport_unit_address(dip) != 0)
2457 		return (DDI_SUCCESS);
2458 
2459 	mpt = ddi_get_soft_state(mptsas_state, ddi_get_instance(dip));
2460 	if (mpt == NULL) {
2461 		return (DDI_FAILURE);
2462 	}
2463 
2464 	mutex_enter(&mpt->m_mutex);
2465 
2466 	/*
2467 	 * If the device is busy, don't lower its power level
2468 	 */
2469 	if (mpt->m_busy && (mpt->m_power_level > level)) {
2470 		mutex_exit(&mpt->m_mutex);
2471 		return (DDI_FAILURE);
2472 	}
2473 	switch (level) {
2474 	case PM_LEVEL_D0:
2475 		NDBG11(("mptsas%d: turning power ON.", mpt->m_instance));
2476 		MPTSAS_POWER_ON(mpt);
2477 		/*
2478 		 * Wait up to 30 seconds for IOC to come out of reset.
2479 		 */
2480 		while (((ioc_status = mptsas_hirrd(mpt,
2481 		    &mpt->m_reg->Doorbell)) &
2482 		    MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
2483 			if (polls++ > 3000) {
2484 				break;
2485 			}
2486 			delay(drv_usectohz(10000));
2487 		}
2488 		/*
2489 		 * If IOC is not in operational state, try to hard reset it.
2490 		 */
2491 		if ((ioc_status & MPI2_IOC_STATE_MASK) !=
2492 		    MPI2_IOC_STATE_OPERATIONAL) {
2493 			mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
2494 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
2495 				mptsas_log(mpt, CE_WARN,
2496 				    "mptsas_power: hard reset failed");
2497 				mutex_exit(&mpt->m_mutex);
2498 				return (DDI_FAILURE);
2499 			}
2500 		}
2501 		mpt->m_power_level = PM_LEVEL_D0;
2502 		break;
2503 	case PM_LEVEL_D3:
2504 		NDBG11(("mptsas%d: turning power OFF.", mpt->m_instance));
2505 		MPTSAS_POWER_OFF(mpt);
2506 		break;
2507 	default:
2508 		mptsas_log(mpt, CE_WARN, "mptsas%d: unknown power level <%x>.",
2509 		    mpt->m_instance, level);
2510 		rval = DDI_FAILURE;
2511 		break;
2512 	}
2513 	mutex_exit(&mpt->m_mutex);
2514 	return (rval);
2515 }
2516 
2517 /*
2518  * Check for newer v2.6 SAS chips.
2519  */
2520 static void
mptsas_ioc_check_rev(mptsas_t * mpt)2521 mptsas_ioc_check_rev(mptsas_t *mpt)
2522 {
2523 	switch (mpt->m_devid) {
2524 	case MPI26_MFGPAGE_DEVID_SAS3816:
2525 	case MPI26_MFGPAGE_DEVID_SAS3816_1:
2526 		mpt->m_is_sea_ioc = 1;
2527 		mptsas_log(mpt, CE_NOTE, "!mptsas3%d: SAS3816 IOC Detected",
2528 		    mpt->m_instance);
2529 		/* fallthrough */
2530 	case MPI26_MFGPAGE_DEVID_SAS3616:
2531 	case MPI26_MFGPAGE_DEVID_SAS3708:
2532 	case MPI26_MFGPAGE_DEVID_SAS3716:
2533 		mptsas_log(mpt, CE_NOTE, "!mptsas3%d: gen3.5 IOC Detected",
2534 		    mpt->m_instance);
2535 		mpt->m_is_gen35_ioc = 1;
2536 		break;
2537 	default:
2538 		break;
2539 	}
2540 }
2541 
2542 /*
2543  * Search through the reg property for the first memory BAR.
2544  */
2545 static void
mptsas_find_mem_bar(mptsas_t * mpt)2546 mptsas_find_mem_bar(mptsas_t *mpt)
2547 {
2548 	int		i, rcount;
2549 	pci_regspec_t	*reg_data;
2550 	int		reglen;
2551 
2552 	mpt->m_mem_bar = MEM_SPACE; /* old default */
2553 	/*
2554 	 * Lookup the 'reg' property.
2555 	 */
2556 	if (ddi_getlongprop(DDI_DEV_T_ANY, mpt->m_dip,
2557 	    DDI_PROP_DONTPASS, "reg", (caddr_t)&reg_data, &reglen) ==
2558 	    DDI_PROP_SUCCESS) {
2559 		rcount = reglen / sizeof (pci_regspec_t);
2560 		for (i = 0; i < rcount; i++) {
2561 			if (PCI_REG_ADDR_G(reg_data[i].pci_phys_hi) ==
2562 			    PCI_REG_ADDR_G(PCI_ADDR_MEM64)) {
2563 				mpt->m_mem_bar = i;
2564 				break;
2565 			}
2566 		}
2567 	}
2568 }
2569 
2570 
2571 /*
2572  * Initialize configuration space and figure out which
2573  * chip and revison of the chip the mpt driver is using.
2574  */
2575 static int
mptsas_config_space_init(mptsas_t * mpt)2576 mptsas_config_space_init(mptsas_t *mpt)
2577 {
2578 	NDBG0(("mptsas_config_space_init"));
2579 
2580 	if (mpt->m_config_handle != NULL)
2581 		return (TRUE);
2582 
2583 	if (pci_config_setup(mpt->m_dip,
2584 	    &mpt->m_config_handle) != DDI_SUCCESS) {
2585 		mptsas_log(mpt, CE_WARN, "cannot map configuration space.");
2586 		return (FALSE);
2587 	}
2588 
2589 	/*
2590 	 * This is a workaround for a XMITS ASIC bug which does not
2591 	 * drive the CBE upper bits.
2592 	 */
2593 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT) &
2594 	    PCI_STAT_PERROR) {
2595 		pci_config_put16(mpt->m_config_handle, PCI_CONF_STAT,
2596 		    PCI_STAT_PERROR);
2597 	}
2598 
2599 	mptsas_setup_cmd_reg(mpt);
2600 
2601 	/*
2602 	 * Get the chip device id:
2603 	 */
2604 	mpt->m_devid = pci_config_get16(mpt->m_config_handle, PCI_CONF_DEVID);
2605 
2606 	/*
2607 	 * Save the revision.
2608 	 */
2609 	mpt->m_revid = pci_config_get8(mpt->m_config_handle, PCI_CONF_REVID);
2610 
2611 	/*
2612 	 * Save the SubSystem Vendor and Device IDs
2613 	 */
2614 	mpt->m_svid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBVENID);
2615 	mpt->m_ssid = pci_config_get16(mpt->m_config_handle, PCI_CONF_SUBSYSID);
2616 
2617 	/*
2618 	 * Set the latency timer to 0x40 as specified by the upa -> pci
2619 	 * bridge chip design team.  This may be done by the sparc pci
2620 	 * bus nexus driver, but the driver should make sure the latency
2621 	 * timer is correct for performance reasons.
2622 	 */
2623 	pci_config_put8(mpt->m_config_handle, PCI_CONF_LATENCY_TIMER,
2624 	    MPTSAS_LATENCY_TIMER);
2625 
2626 	mptsas_ioc_check_rev(mpt);
2627 	mptsas_find_mem_bar(mpt);
2628 	(void) mptsas_get_pci_cap(mpt);
2629 	return (TRUE);
2630 }
2631 
2632 static void
mptsas_config_space_fini(mptsas_t * mpt)2633 mptsas_config_space_fini(mptsas_t *mpt)
2634 {
2635 	if (mpt->m_config_handle != NULL) {
2636 		mptsas_disable_bus_master(mpt);
2637 		pci_config_teardown(&mpt->m_config_handle);
2638 		mpt->m_config_handle = NULL;
2639 	}
2640 }
2641 
2642 static void
mptsas_setup_cmd_reg(mptsas_t * mpt)2643 mptsas_setup_cmd_reg(mptsas_t *mpt)
2644 {
2645 	ushort_t	cmdreg;
2646 
2647 	/*
2648 	 * Set the command register to the needed values.
2649 	 */
2650 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2651 	cmdreg |= (PCI_COMM_ME | PCI_COMM_SERR_ENABLE |
2652 	    PCI_COMM_PARITY_DETECT | PCI_COMM_MAE);
2653 	cmdreg &= ~PCI_COMM_IO;
2654 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2655 }
2656 
2657 static void
mptsas_disable_bus_master(mptsas_t * mpt)2658 mptsas_disable_bus_master(mptsas_t *mpt)
2659 {
2660 	ushort_t	cmdreg;
2661 
2662 	/*
2663 	 * Clear the master enable bit in the PCI command register.
2664 	 * This prevents any bus mastering activity like DMA.
2665 	 */
2666 	cmdreg = pci_config_get16(mpt->m_config_handle, PCI_CONF_COMM);
2667 	cmdreg &= ~PCI_COMM_ME;
2668 	pci_config_put16(mpt->m_config_handle, PCI_CONF_COMM, cmdreg);
2669 }
2670 
2671 int
mptsas_dma_alloc(mptsas_t * mpt,mptsas_dma_alloc_state_t * dma_statep)2672 mptsas_dma_alloc(mptsas_t *mpt, mptsas_dma_alloc_state_t *dma_statep)
2673 {
2674 	ddi_dma_attr_t	attrs;
2675 
2676 	attrs = mpt->m_io_dma_attr;
2677 	attrs.dma_attr_sgllen = 1;
2678 
2679 	ASSERT(dma_statep != NULL);
2680 
2681 	if (mptsas_dma_addr_create(mpt, attrs, &dma_statep->handle,
2682 	    &dma_statep->accessp, &dma_statep->memp, dma_statep->size,
2683 	    &dma_statep->cookie) == FALSE) {
2684 		return (DDI_FAILURE);
2685 	}
2686 
2687 	return (DDI_SUCCESS);
2688 }
2689 
2690 void
mptsas_dma_free(mptsas_dma_alloc_state_t * dma_statep)2691 mptsas_dma_free(mptsas_dma_alloc_state_t *dma_statep)
2692 {
2693 	ASSERT(dma_statep != NULL);
2694 	mptsas_dma_addr_destroy(&dma_statep->handle, &dma_statep->accessp);
2695 	dma_statep->size = 0;
2696 }
2697 
2698 int
mptsas_do_dma(mptsas_t * mpt,uint32_t size,int var,int (* callback)())2699 mptsas_do_dma(mptsas_t *mpt, uint32_t size, int var, int (*callback)())
2700 {
2701 	ddi_dma_attr_t		attrs;
2702 	ddi_dma_handle_t	dma_handle;
2703 	caddr_t			memp;
2704 	ddi_acc_handle_t	accessp;
2705 	int			rval;
2706 
2707 	ASSERT(mutex_owned(&mpt->m_mutex));
2708 
2709 	attrs = mpt->m_msg_dma_attr;
2710 	attrs.dma_attr_sgllen = 1;
2711 	attrs.dma_attr_granular = size;
2712 
2713 	if (mptsas_dma_addr_create(mpt, attrs, &dma_handle,
2714 	    &accessp, &memp, size, NULL) == FALSE) {
2715 		return (DDI_FAILURE);
2716 	}
2717 
2718 	rval = (*callback) (mpt, memp, var, accessp);
2719 
2720 	if ((mptsas_check_dma_handle(dma_handle) != DDI_SUCCESS) ||
2721 	    (mptsas_check_acc_handle(accessp) != DDI_SUCCESS)) {
2722 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
2723 		rval = DDI_FAILURE;
2724 	}
2725 
2726 	mptsas_dma_addr_destroy(&dma_handle, &accessp);
2727 	return (rval);
2728 
2729 }
2730 
2731 static int
mptsas_alloc_request_frames(mptsas_t * mpt)2732 mptsas_alloc_request_frames(mptsas_t *mpt)
2733 {
2734 	ddi_dma_attr_t		frame_dma_attrs;
2735 	caddr_t			memp;
2736 	ddi_dma_cookie_t	cookie;
2737 	size_t			mem_size;
2738 
2739 	/*
2740 	 * re-alloc when it has already alloced
2741 	 */
2742 	if (mpt->m_dma_req_frame_hdl)
2743 		mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
2744 		    &mpt->m_acc_req_frame_hdl);
2745 
2746 	/*
2747 	 * The size of the request frame pool is:
2748 	 *   Number of Request Frames * Request Frame Size
2749 	 */
2750 	mem_size = mpt->m_max_requests * mpt->m_req_frame_size;
2751 
2752 	/*
2753 	 * set the DMA attributes.  System Request Message Frames must be
2754 	 * aligned on a 16-byte boundry.
2755 	 */
2756 	frame_dma_attrs = mpt->m_msg_dma_attr;
2757 	frame_dma_attrs.dma_attr_align = 16;
2758 	frame_dma_attrs.dma_attr_sgllen = 1;
2759 
2760 	/*
2761 	 * allocate the request frame pool.
2762 	 */
2763 	if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2764 	    &mpt->m_dma_req_frame_hdl, &mpt->m_acc_req_frame_hdl, &memp,
2765 	    mem_size, &cookie) == FALSE) {
2766 		return (DDI_FAILURE);
2767 	}
2768 
2769 	/*
2770 	 * Store the request frame memory address.  This chip uses this
2771 	 * address to dma to and from the driver's frame.  The second
2772 	 * address is the address mpt uses to fill in the frame.
2773 	 */
2774 	mpt->m_req_frame_dma_addr = cookie.dmac_laddress;
2775 	mpt->m_req_frame = memp;
2776 
2777 	/*
2778 	 * Clear the request frame pool.
2779 	 */
2780 	bzero(mpt->m_req_frame, mem_size);
2781 
2782 	return (DDI_SUCCESS);
2783 }
2784 
2785 static int
mptsas_alloc_sense_bufs(mptsas_t * mpt)2786 mptsas_alloc_sense_bufs(mptsas_t *mpt)
2787 {
2788 	ddi_dma_attr_t		sense_dma_attrs;
2789 	caddr_t			memp;
2790 	ddi_dma_cookie_t	cookie;
2791 	size_t			mem_size;
2792 	int			num_extrqsense_bufs;
2793 
2794 	ASSERT(mpt->m_extreq_sense_refcount == 0);
2795 
2796 	/*
2797 	 * re-alloc when it has already alloced
2798 	 */
2799 	if (mpt->m_dma_req_sense_hdl) {
2800 		rmfreemap(mpt->m_erqsense_map);
2801 		mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
2802 		    &mpt->m_acc_req_sense_hdl);
2803 	}
2804 
2805 	/*
2806 	 * The size of the request sense pool is:
2807 	 *   (Number of Request Frames - 2 ) * Request Sense Size +
2808 	 *   extra memory for extended sense requests.
2809 	 */
2810 	mem_size = ((mpt->m_max_requests - 2) * mpt->m_req_sense_size) +
2811 	    mptsas_extreq_sense_bufsize;
2812 
2813 	/*
2814 	 * set the DMA attributes.  ARQ buffers
2815 	 * aligned on a 16-byte boundry.
2816 	 */
2817 	sense_dma_attrs = mpt->m_msg_dma_attr;
2818 	sense_dma_attrs.dma_attr_align = 16;
2819 	sense_dma_attrs.dma_attr_sgllen = 1;
2820 
2821 	/*
2822 	 * allocate the request sense buffer pool.
2823 	 */
2824 	if (mptsas_dma_addr_create(mpt, sense_dma_attrs,
2825 	    &mpt->m_dma_req_sense_hdl, &mpt->m_acc_req_sense_hdl, &memp,
2826 	    mem_size, &cookie) == FALSE) {
2827 		return (DDI_FAILURE);
2828 	}
2829 
2830 	/*
2831 	 * Store the request sense base memory address.  This chip uses this
2832 	 * address to dma the request sense data.  The second
2833 	 * address is the address mpt uses to access the data.
2834 	 * The third is the base for the extended rqsense buffers.
2835 	 */
2836 	mpt->m_req_sense_dma_addr = cookie.dmac_laddress;
2837 	mpt->m_req_sense = memp;
2838 	memp += (mpt->m_max_requests - 2) * mpt->m_req_sense_size;
2839 	mpt->m_extreq_sense = memp;
2840 
2841 	/*
2842 	 * The extra memory is divided up into multiples of the base
2843 	 * buffer size in order to allocate via rmalloc().
2844 	 * Note that the rmallocmap cannot start at zero!
2845 	 */
2846 	num_extrqsense_bufs = mptsas_extreq_sense_bufsize /
2847 	    mpt->m_req_sense_size;
2848 	mpt->m_erqsense_map = rmallocmap_wait(num_extrqsense_bufs);
2849 	rmfree(mpt->m_erqsense_map, num_extrqsense_bufs, 1);
2850 
2851 	/*
2852 	 * Clear the pool.
2853 	 */
2854 	bzero(mpt->m_req_sense, mem_size);
2855 
2856 	return (DDI_SUCCESS);
2857 }
2858 
2859 static int
mptsas_alloc_reply_frames(mptsas_t * mpt)2860 mptsas_alloc_reply_frames(mptsas_t *mpt)
2861 {
2862 	ddi_dma_attr_t		frame_dma_attrs;
2863 	caddr_t			memp;
2864 	ddi_dma_cookie_t	cookie;
2865 	size_t			mem_size;
2866 
2867 	/*
2868 	 * re-alloc when it has already alloced
2869 	 */
2870 	if (mpt->m_dma_reply_frame_hdl) {
2871 		mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
2872 		    &mpt->m_acc_reply_frame_hdl);
2873 	}
2874 
2875 	/*
2876 	 * The size of the reply frame pool is:
2877 	 *   Number of Reply Frames * Reply Frame Size
2878 	 */
2879 	mem_size = mpt->m_max_replies * mpt->m_reply_frame_size;
2880 
2881 	/*
2882 	 * set the DMA attributes.   System Reply Message Frames must be
2883 	 * aligned on a 4-byte boundry.  This is the default.
2884 	 */
2885 	frame_dma_attrs = mpt->m_msg_dma_attr;
2886 	frame_dma_attrs.dma_attr_sgllen = 1;
2887 
2888 	/*
2889 	 * allocate the reply frame pool
2890 	 */
2891 	if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2892 	    &mpt->m_dma_reply_frame_hdl, &mpt->m_acc_reply_frame_hdl, &memp,
2893 	    mem_size, &cookie) == FALSE) {
2894 		return (DDI_FAILURE);
2895 	}
2896 
2897 	/*
2898 	 * Store the reply frame memory address.  This chip uses this
2899 	 * address to dma to and from the driver's frame.  The second
2900 	 * address is the address mpt uses to process the frame.
2901 	 */
2902 	mpt->m_reply_frame_dma_addr = cookie.dmac_laddress;
2903 	mpt->m_reply_frame = memp;
2904 
2905 	/*
2906 	 * Clear the reply frame pool.
2907 	 */
2908 	bzero(mpt->m_reply_frame, mem_size);
2909 
2910 	return (DDI_SUCCESS);
2911 }
2912 
2913 static int
mptsas_alloc_free_queue(mptsas_t * mpt)2914 mptsas_alloc_free_queue(mptsas_t *mpt)
2915 {
2916 	ddi_dma_attr_t		frame_dma_attrs;
2917 	caddr_t			memp;
2918 	ddi_dma_cookie_t	cookie;
2919 	size_t			mem_size;
2920 
2921 	/*
2922 	 * re-alloc when it has already alloced
2923 	 */
2924 	if (mpt->m_dma_free_queue_hdl) {
2925 		mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
2926 		    &mpt->m_acc_free_queue_hdl);
2927 	}
2928 
2929 	/*
2930 	 * The reply free queue size is:
2931 	 *   Reply Free Queue Depth * 4
2932 	 * The "4" is the size of one 32 bit address (low part of 64-bit
2933 	 *   address)
2934 	 */
2935 	mem_size = mpt->m_free_queue_depth * 4;
2936 
2937 	/*
2938 	 * set the DMA attributes  The Reply Free Queue must be aligned on a
2939 	 * 16-byte boundry.
2940 	 */
2941 	frame_dma_attrs = mpt->m_msg_dma_attr;
2942 	frame_dma_attrs.dma_attr_align = 16;
2943 	frame_dma_attrs.dma_attr_sgllen = 1;
2944 
2945 	/*
2946 	 * allocate the reply free queue
2947 	 */
2948 	if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
2949 	    &mpt->m_dma_free_queue_hdl, &mpt->m_acc_free_queue_hdl, &memp,
2950 	    mem_size, &cookie) == FALSE) {
2951 		return (DDI_FAILURE);
2952 	}
2953 
2954 	/*
2955 	 * Store the reply free queue memory address.  This chip uses this
2956 	 * address to read from the reply free queue.  The second address
2957 	 * is the address mpt uses to manage the queue.
2958 	 */
2959 	mpt->m_free_queue_dma_addr = cookie.dmac_laddress;
2960 	mpt->m_free_queue = memp;
2961 
2962 	/*
2963 	 * Clear the reply free queue memory.
2964 	 */
2965 	bzero(mpt->m_free_queue, mem_size);
2966 
2967 	return (DDI_SUCCESS);
2968 }
2969 
2970 static int
mptsas_alloc_post_queue(mptsas_t * mpt)2971 mptsas_alloc_post_queue(mptsas_t *mpt)
2972 {
2973 	ddi_dma_attr_t		frame_dma_attrs;
2974 	caddr_t			memp;
2975 	ddi_dma_cookie_t	cookie;
2976 	size_t			mem_size;
2977 
2978 	/*
2979 	 * re-alloc when it has already alloced
2980 	 */
2981 	if (mpt->m_dma_post_queue_hdl) {
2982 		mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
2983 		    &mpt->m_acc_post_queue_hdl);
2984 	}
2985 
2986 	/*
2987 	 * The reply descriptor post queue size is:
2988 	 *   Reply Descriptor Post Queue Depth * 8
2989 	 * The "8" is the size of each descriptor (8 bytes or 64 bits).
2990 	 */
2991 	mem_size = mpt->m_post_queue_depth * 8;
2992 
2993 	/*
2994 	 * set the DMA attributes.  The Reply Descriptor Post Queue must be
2995 	 * aligned on a 16-byte boundry.
2996 	 */
2997 	frame_dma_attrs = mpt->m_msg_dma_attr;
2998 	frame_dma_attrs.dma_attr_align = 16;
2999 	frame_dma_attrs.dma_attr_sgllen = 1;
3000 
3001 	/*
3002 	 * allocate the reply post queue
3003 	 */
3004 	if (mptsas_dma_addr_create(mpt, frame_dma_attrs,
3005 	    &mpt->m_dma_post_queue_hdl, &mpt->m_acc_post_queue_hdl, &memp,
3006 	    mem_size, &cookie) == FALSE) {
3007 		return (DDI_FAILURE);
3008 	}
3009 
3010 	/*
3011 	 * Store the reply descriptor post queue memory address.  This chip
3012 	 * uses this address to write to the reply descriptor post queue.  The
3013 	 * second address is the address mpt uses to manage the queue.
3014 	 */
3015 	mpt->m_post_queue_dma_addr = cookie.dmac_laddress;
3016 	mpt->m_post_queue = memp;
3017 
3018 	/*
3019 	 * Clear the reply post queue memory.
3020 	 */
3021 	bzero(mpt->m_post_queue, mem_size);
3022 
3023 	return (DDI_SUCCESS);
3024 }
3025 
3026 static void
mptsas_alloc_reply_args(mptsas_t * mpt)3027 mptsas_alloc_reply_args(mptsas_t *mpt)
3028 {
3029 	if (mpt->m_replyh_args == NULL) {
3030 		mpt->m_replyh_args = kmem_zalloc(sizeof (m_replyh_arg_t) *
3031 		    mpt->m_max_replies, KM_SLEEP);
3032 	}
3033 }
3034 
3035 static int
mptsas_alloc_extra_sgl_frame(mptsas_t * mpt,mptsas_cmd_t * cmd)3036 mptsas_alloc_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
3037 {
3038 	mptsas_cache_frames_t	*frames = NULL;
3039 	if (cmd->cmd_extra_frames == NULL) {
3040 		frames = kmem_cache_alloc(mpt->m_cache_frames, KM_NOSLEEP);
3041 		if (frames == NULL) {
3042 			return (DDI_FAILURE);
3043 		}
3044 		cmd->cmd_extra_frames = frames;
3045 	}
3046 	return (DDI_SUCCESS);
3047 }
3048 
3049 static void
mptsas_free_extra_sgl_frame(mptsas_t * mpt,mptsas_cmd_t * cmd)3050 mptsas_free_extra_sgl_frame(mptsas_t *mpt, mptsas_cmd_t *cmd)
3051 {
3052 	if (cmd->cmd_extra_frames) {
3053 		kmem_cache_free(mpt->m_cache_frames,
3054 		    (void *)cmd->cmd_extra_frames);
3055 		cmd->cmd_extra_frames = NULL;
3056 	}
3057 }
3058 
3059 static void
mptsas_cfg_fini(mptsas_t * mpt)3060 mptsas_cfg_fini(mptsas_t *mpt)
3061 {
3062 	NDBG0(("mptsas_cfg_fini"));
3063 	ddi_regs_map_free(&mpt->m_datap);
3064 }
3065 
3066 static void
mptsas_hba_fini(mptsas_t * mpt)3067 mptsas_hba_fini(mptsas_t *mpt)
3068 {
3069 	NDBG0(("mptsas_hba_fini"));
3070 
3071 	/*
3072 	 * Free up any allocated memory
3073 	 */
3074 	if (mpt->m_dma_req_frame_hdl) {
3075 		mptsas_dma_addr_destroy(&mpt->m_dma_req_frame_hdl,
3076 		    &mpt->m_acc_req_frame_hdl);
3077 	}
3078 
3079 	if (mpt->m_dma_req_sense_hdl) {
3080 		rmfreemap(mpt->m_erqsense_map);
3081 		mptsas_dma_addr_destroy(&mpt->m_dma_req_sense_hdl,
3082 		    &mpt->m_acc_req_sense_hdl);
3083 	}
3084 
3085 	if (mpt->m_dma_reply_frame_hdl) {
3086 		mptsas_dma_addr_destroy(&mpt->m_dma_reply_frame_hdl,
3087 		    &mpt->m_acc_reply_frame_hdl);
3088 	}
3089 
3090 	if (mpt->m_dma_free_queue_hdl) {
3091 		mptsas_dma_addr_destroy(&mpt->m_dma_free_queue_hdl,
3092 		    &mpt->m_acc_free_queue_hdl);
3093 	}
3094 
3095 	if (mpt->m_dma_post_queue_hdl) {
3096 		mptsas_dma_addr_destroy(&mpt->m_dma_post_queue_hdl,
3097 		    &mpt->m_acc_post_queue_hdl);
3098 	}
3099 
3100 	if (mpt->m_replyh_args != NULL) {
3101 		kmem_free(mpt->m_replyh_args, sizeof (m_replyh_arg_t)
3102 		    * mpt->m_max_replies);
3103 	}
3104 }
3105 
3106 static int
mptsas_name_child(dev_info_t * lun_dip,char * name,int len)3107 mptsas_name_child(dev_info_t *lun_dip, char *name, int len)
3108 {
3109 	int		lun = 0;
3110 	char		*sas_wwn = NULL;
3111 	int		phynum = -1;
3112 	int		reallen = 0;
3113 
3114 	/* Get the target num */
3115 	lun = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip, DDI_PROP_DONTPASS,
3116 	    LUN_PROP, 0);
3117 
3118 	if ((phynum = ddi_prop_get_int(DDI_DEV_T_ANY, lun_dip,
3119 	    DDI_PROP_DONTPASS, "sata-phy", -1)) != -1) {
3120 		/*
3121 		 * Stick in the address of form "pPHY,LUN"
3122 		 */
3123 		reallen = snprintf(name, len, "p%x,%x", phynum, lun);
3124 	} else if (ddi_prop_lookup_string(DDI_DEV_T_ANY, lun_dip,
3125 	    DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &sas_wwn)
3126 	    == DDI_PROP_SUCCESS) {
3127 		/*
3128 		 * Stick in the address of the form "wWWN,LUN"
3129 		 */
3130 		reallen = snprintf(name, len, "%s,%x", sas_wwn, lun);
3131 		ddi_prop_free(sas_wwn);
3132 	} else {
3133 		return (DDI_FAILURE);
3134 	}
3135 
3136 	ASSERT(reallen < len);
3137 	if (reallen >= len) {
3138 		mptsas_log(0, CE_WARN, "!mptsas_get_name: name parameter "
3139 		    "length too small, it needs to be %d bytes", reallen + 1);
3140 	}
3141 	return (DDI_SUCCESS);
3142 }
3143 
3144 /*
3145  * tran_tgt_init(9E) - target device instance initialization
3146  */
3147 static int
mptsas_scsi_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)3148 mptsas_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3149     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3150 {
3151 #ifndef __lock_lint
3152 	_NOTE(ARGUNUSED(hba_tran))
3153 #endif
3154 
3155 	/*
3156 	 * At this point, the scsi_device structure already exists
3157 	 * and has been initialized.
3158 	 *
3159 	 * Use this function to allocate target-private data structures,
3160 	 * if needed by this HBA.  Add revised flow-control and queue
3161 	 * properties for child here, if desired and if you can tell they
3162 	 * support tagged queueing by now.
3163 	 */
3164 	mptsas_t		*mpt;
3165 	int			lun = sd->sd_address.a_lun;
3166 	mdi_pathinfo_t		*pip = NULL;
3167 	mptsas_tgt_private_t	*tgt_private = NULL;
3168 	mptsas_target_t		*ptgt = NULL;
3169 	char			*psas_wwn = NULL;
3170 	mptsas_phymask_t	phymask = 0;
3171 	uint64_t		sas_wwn = 0;
3172 	mptsas_target_addr_t	addr;
3173 	mpt = SDEV2MPT(sd);
3174 
3175 	ASSERT(scsi_hba_iport_unit_address(hba_dip) != 0);
3176 
3177 	NDBG0(("mptsas_scsi_tgt_init: hbadip=0x%p tgtdip=0x%p lun=%d",
3178 	    (void *)hba_dip, (void *)tgt_dip, lun));
3179 
3180 	if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
3181 		(void) ndi_merge_node(tgt_dip, mptsas_name_child);
3182 		ddi_set_name_addr(tgt_dip, NULL);
3183 		return (DDI_FAILURE);
3184 	}
3185 	/*
3186 	 * phymask is 0 means the virtual port for RAID
3187 	 */
3188 	phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, hba_dip, 0,
3189 	    "phymask", 0);
3190 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3191 		if ((pip = (void *)(sd->sd_private)) == NULL) {
3192 			/*
3193 			 * Very bad news if this occurs. Somehow scsi_vhci has
3194 			 * lost the pathinfo node for this target.
3195 			 */
3196 			return (DDI_NOT_WELL_FORMED);
3197 		}
3198 
3199 		if (mdi_prop_lookup_int(pip, LUN_PROP, &lun) !=
3200 		    DDI_PROP_SUCCESS) {
3201 			mptsas_log(mpt, CE_WARN, "Get lun property failed\n");
3202 			return (DDI_FAILURE);
3203 		}
3204 
3205 		if (mdi_prop_lookup_string(pip, SCSI_ADDR_PROP_TARGET_PORT,
3206 		    &psas_wwn) == MDI_SUCCESS) {
3207 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3208 				sas_wwn = 0;
3209 			}
3210 			(void) mdi_prop_free(psas_wwn);
3211 		}
3212 	} else {
3213 		lun = ddi_prop_get_int(DDI_DEV_T_ANY, tgt_dip,
3214 		    DDI_PROP_DONTPASS, LUN_PROP, 0);
3215 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, tgt_dip,
3216 		    DDI_PROP_DONTPASS, SCSI_ADDR_PROP_TARGET_PORT, &psas_wwn) ==
3217 		    DDI_PROP_SUCCESS) {
3218 			if (scsi_wwnstr_to_wwn(psas_wwn, &sas_wwn)) {
3219 				sas_wwn = 0;
3220 			}
3221 			ddi_prop_free(psas_wwn);
3222 		} else {
3223 			sas_wwn = 0;
3224 		}
3225 	}
3226 
3227 	ASSERT((sas_wwn != 0) || (phymask != 0));
3228 	addr.mta_wwn = sas_wwn;
3229 	addr.mta_phymask = phymask;
3230 	mutex_enter(&mpt->m_mutex);
3231 	ptgt = refhash_lookup(mpt->m_targets, &addr);
3232 	mutex_exit(&mpt->m_mutex);
3233 	if (ptgt == NULL) {
3234 		mptsas_log(mpt, CE_WARN, "!tgt_init: target doesn't exist or "
3235 		    "gone already! phymask:%x, saswwn %"PRIx64, phymask,
3236 		    sas_wwn);
3237 		return (DDI_FAILURE);
3238 	}
3239 	if (hba_tran->tran_tgt_private == NULL) {
3240 		tgt_private = kmem_zalloc(sizeof (mptsas_tgt_private_t),
3241 		    KM_SLEEP);
3242 		tgt_private->t_lun = lun;
3243 		tgt_private->t_private = ptgt;
3244 		hba_tran->tran_tgt_private = tgt_private;
3245 	}
3246 
3247 	if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
3248 		return (DDI_SUCCESS);
3249 	}
3250 	mutex_enter(&mpt->m_mutex);
3251 
3252 	if (ptgt->m_deviceinfo &
3253 	    (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
3254 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
3255 		uchar_t *inq89 = NULL;
3256 		int inq89_len = 0x238;
3257 		int reallen = 0;
3258 		int rval = 0;
3259 		struct sata_id *sid = NULL;
3260 		char model[SATA_ID_MODEL_LEN + 1];
3261 		char fw[SATA_ID_FW_LEN + 1];
3262 		char *vid, *pid;
3263 
3264 		mutex_exit(&mpt->m_mutex);
3265 		/*
3266 		 * According SCSI/ATA Translation -2 (SAT-2) revision 01a
3267 		 * chapter 12.4.2 VPD page 89h includes 512 bytes ATA IDENTIFY
3268 		 * DEVICE data or ATA IDENTIFY PACKET DEVICE data.
3269 		 */
3270 		inq89 = kmem_zalloc(inq89_len, KM_SLEEP);
3271 		rval = mptsas_inquiry(mpt, ptgt, 0, 0x89,
3272 		    inq89, inq89_len, &reallen, 1);
3273 
3274 		if (rval != 0) {
3275 			if (inq89 != NULL) {
3276 				kmem_free(inq89, inq89_len);
3277 			}
3278 
3279 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
3280 			    "0x89 for SATA target:%x failed!", ptgt->m_devhdl);
3281 			return (DDI_SUCCESS);
3282 		}
3283 		sid = (void *)(&inq89[60]);
3284 
3285 		swab(sid->ai_model, model, SATA_ID_MODEL_LEN);
3286 		swab(sid->ai_fw, fw, SATA_ID_FW_LEN);
3287 
3288 		model[SATA_ID_MODEL_LEN] = 0;
3289 		fw[SATA_ID_FW_LEN] = 0;
3290 
3291 		sata_split_model(model, &vid, &pid);
3292 
3293 		/*
3294 		 * override SCSA "inquiry-*" properties
3295 		 */
3296 		if (vid)
3297 			(void) scsi_device_prop_update_inqstring(sd,
3298 			    INQUIRY_VENDOR_ID, vid, strlen(vid));
3299 		if (pid)
3300 			(void) scsi_device_prop_update_inqstring(sd,
3301 			    INQUIRY_PRODUCT_ID, pid, strlen(pid));
3302 		(void) scsi_device_prop_update_inqstring(sd,
3303 		    INQUIRY_REVISION_ID, fw, strlen(fw));
3304 
3305 		if (inq89 != NULL) {
3306 			kmem_free(inq89, inq89_len);
3307 		}
3308 	} else {
3309 		mutex_exit(&mpt->m_mutex);
3310 	}
3311 
3312 	return (DDI_SUCCESS);
3313 }
3314 /*
3315  * tran_tgt_free(9E) - target device instance deallocation
3316  */
3317 static void
mptsas_scsi_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)3318 mptsas_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
3319     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
3320 {
3321 #ifndef __lock_lint
3322 	_NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran, sd))
3323 #endif
3324 
3325 	mptsas_tgt_private_t	*tgt_private = hba_tran->tran_tgt_private;
3326 
3327 	if (tgt_private != NULL) {
3328 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
3329 		hba_tran->tran_tgt_private = NULL;
3330 	}
3331 }
3332 
3333 /*
3334  * scsi_pkt handling
3335  *
3336  * Visible to the external world via the transport structure.
3337  */
3338 
3339 /*
3340  * Notes:
3341  *	- transport the command to the addressed SCSI target/lun device
3342  *	- normal operation is to schedule the command to be transported,
3343  *	  and return TRAN_ACCEPT if this is successful.
3344  *	- if NO_INTR, tran_start must poll device for command completion
3345  */
3346 static int
mptsas_scsi_start(struct scsi_address * ap,struct scsi_pkt * pkt)3347 mptsas_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
3348 {
3349 #ifndef __lock_lint
3350 	_NOTE(ARGUNUSED(ap))
3351 #endif
3352 	mptsas_t	*mpt = PKT2MPT(pkt);
3353 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
3354 	int		rval;
3355 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3356 
3357 	NDBG1(("mptsas_scsi_start: pkt=0x%p", (void *)pkt));
3358 	ASSERT(ptgt);
3359 	if (ptgt == NULL)
3360 		return (TRAN_FATAL_ERROR);
3361 
3362 	/*
3363 	 * prepare the pkt before taking mutex.
3364 	 */
3365 	rval = mptsas_prepare_pkt(cmd);
3366 	if (rval != TRAN_ACCEPT) {
3367 		return (rval);
3368 	}
3369 
3370 	/*
3371 	 * Send the command to target/lun, however your HBA requires it.
3372 	 * If busy, return TRAN_BUSY; if there's some other formatting error
3373 	 * in the packet, return TRAN_BADPKT; otherwise, fall through to the
3374 	 * return of TRAN_ACCEPT.
3375 	 *
3376 	 * Remember that access to shared resources, including the mptsas_t
3377 	 * data structure and the HBA hardware registers, must be protected
3378 	 * with mutexes, here and everywhere.
3379 	 *
3380 	 * Also remember that at interrupt time, you'll get an argument
3381 	 * to the interrupt handler which is a pointer to your mptsas_t
3382 	 * structure; you'll have to remember which commands are outstanding
3383 	 * and which scsi_pkt is the currently-running command so the
3384 	 * interrupt handler can refer to the pkt to set completion
3385 	 * status, call the target driver back through pkt_comp, etc.
3386 	 *
3387 	 * If the instance lock is held by other thread, don't spin to wait
3388 	 * for it. Instead, queue the cmd and next time when the instance lock
3389 	 * is not held, accept all the queued cmd. A extra tx_waitq is
3390 	 * introduced to protect the queue.
3391 	 *
3392 	 * The polled cmd will not be queud and accepted as usual.
3393 	 *
3394 	 * Under the tx_waitq mutex, record whether a thread is draining
3395 	 * the tx_waitq.  An IO requesting thread that finds the instance
3396 	 * mutex contended appends to the tx_waitq and while holding the
3397 	 * tx_wait mutex, if the draining flag is not set, sets it and then
3398 	 * proceeds to spin for the instance mutex. This scheme ensures that
3399 	 * the last cmd in a burst be processed.
3400 	 *
3401 	 * we enable this feature only when the helper threads are enabled,
3402 	 * at which we think the loads are heavy.
3403 	 *
3404 	 * per instance mutex m_tx_waitq_mutex is introduced to protect the
3405 	 * m_tx_waitqtail, m_tx_waitq, m_tx_draining.
3406 	 */
3407 
3408 	if (mpt->m_doneq_thread_n) {
3409 		if (mutex_tryenter(&mpt->m_mutex) != 0) {
3410 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3411 			mutex_exit(&mpt->m_mutex);
3412 		} else if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3413 			mutex_enter(&mpt->m_mutex);
3414 			rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3415 			mutex_exit(&mpt->m_mutex);
3416 		} else {
3417 			mutex_enter(&mpt->m_tx_waitq_mutex);
3418 			/*
3419 			 * ptgt->m_dr_flag is protected by m_mutex or
3420 			 * m_tx_waitq_mutex. In this case, m_tx_waitq_mutex
3421 			 * is acquired.
3422 			 */
3423 			if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3424 				if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3425 					/*
3426 					 * The command should be allowed to
3427 					 * retry by returning TRAN_BUSY to
3428 					 * to stall the I/O's which come from
3429 					 * scsi_vhci since the device/path is
3430 					 * in unstable state now.
3431 					 */
3432 					mutex_exit(&mpt->m_tx_waitq_mutex);
3433 					return (TRAN_BUSY);
3434 				} else {
3435 					/*
3436 					 * The device is offline, just fail the
3437 					 * command by returning
3438 					 * TRAN_FATAL_ERROR.
3439 					 */
3440 					mutex_exit(&mpt->m_tx_waitq_mutex);
3441 					return (TRAN_FATAL_ERROR);
3442 				}
3443 			}
3444 			if (mpt->m_tx_draining) {
3445 				cmd->cmd_flags |= CFLAG_TXQ;
3446 				*mpt->m_tx_waitqtail = cmd;
3447 				mpt->m_tx_waitqtail = &cmd->cmd_linkp;
3448 				mutex_exit(&mpt->m_tx_waitq_mutex);
3449 			} else { /* drain the queue */
3450 				mpt->m_tx_draining = 1;
3451 				mutex_exit(&mpt->m_tx_waitq_mutex);
3452 				mutex_enter(&mpt->m_mutex);
3453 				rval = mptsas_accept_txwq_and_pkt(mpt, cmd);
3454 				mutex_exit(&mpt->m_mutex);
3455 			}
3456 		}
3457 	} else {
3458 		mutex_enter(&mpt->m_mutex);
3459 		/*
3460 		 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3461 		 * in this case, m_mutex is acquired.
3462 		 */
3463 		if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3464 			if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3465 				/*
3466 				 * commands should be allowed to retry by
3467 				 * returning TRAN_BUSY to stall the I/O's
3468 				 * which come from scsi_vhci since the device/
3469 				 * path is in unstable state now.
3470 				 */
3471 				mutex_exit(&mpt->m_mutex);
3472 				return (TRAN_BUSY);
3473 			} else {
3474 				/*
3475 				 * The device is offline, just fail the
3476 				 * command by returning TRAN_FATAL_ERROR.
3477 				 */
3478 				mutex_exit(&mpt->m_mutex);
3479 				return (TRAN_FATAL_ERROR);
3480 			}
3481 		}
3482 		rval = mptsas_accept_pkt(mpt, cmd);
3483 		mutex_exit(&mpt->m_mutex);
3484 	}
3485 
3486 	return (rval);
3487 }
3488 
3489 /*
3490  * Accept all the queued cmds(if any) before accept the current one.
3491  */
3492 static int
mptsas_accept_txwq_and_pkt(mptsas_t * mpt,mptsas_cmd_t * cmd)3493 mptsas_accept_txwq_and_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3494 {
3495 	int rval;
3496 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3497 
3498 	ASSERT(mutex_owned(&mpt->m_mutex));
3499 	/*
3500 	 * The call to mptsas_accept_tx_waitq() must always be performed
3501 	 * because that is where mpt->m_tx_draining is cleared.
3502 	 */
3503 	mutex_enter(&mpt->m_tx_waitq_mutex);
3504 	mptsas_accept_tx_waitq(mpt);
3505 	mutex_exit(&mpt->m_tx_waitq_mutex);
3506 	/*
3507 	 * ptgt->m_dr_flag is protected by m_mutex or m_tx_waitq_mutex
3508 	 * in this case, m_mutex is acquired.
3509 	 */
3510 	if (ptgt->m_dr_flag == MPTSAS_DR_INTRANSITION) {
3511 		if (cmd->cmd_pkt_flags & FLAG_NOQUEUE) {
3512 			/*
3513 			 * The command should be allowed to retry by returning
3514 			 * TRAN_BUSY to stall the I/O's which come from
3515 			 * scsi_vhci since the device/path is in unstable state
3516 			 * now.
3517 			 */
3518 			return (TRAN_BUSY);
3519 		} else {
3520 			/*
3521 			 * The device is offline, just fail the command by
3522 			 * return TRAN_FATAL_ERROR.
3523 			 */
3524 			return (TRAN_FATAL_ERROR);
3525 		}
3526 	}
3527 	rval = mptsas_accept_pkt(mpt, cmd);
3528 
3529 	return (rval);
3530 }
3531 
3532 static int
mptsas_accept_pkt(mptsas_t * mpt,mptsas_cmd_t * cmd)3533 mptsas_accept_pkt(mptsas_t *mpt, mptsas_cmd_t *cmd)
3534 {
3535 	int		rval = TRAN_ACCEPT;
3536 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
3537 
3538 	NDBG1(("mptsas_accept_pkt: cmd=0x%p", (void *)cmd));
3539 
3540 	ASSERT(mutex_owned(&mpt->m_mutex));
3541 
3542 	if ((cmd->cmd_flags & CFLAG_PREPARED) == 0) {
3543 		rval = mptsas_prepare_pkt(cmd);
3544 		if (rval != TRAN_ACCEPT) {
3545 			cmd->cmd_flags &= ~CFLAG_TRANFLAG;
3546 			return (rval);
3547 		}
3548 	}
3549 
3550 	/*
3551 	 * reset the throttle if we were draining
3552 	 */
3553 	if ((ptgt->m_t_ncmds == 0) &&
3554 	    (ptgt->m_t_throttle == DRAIN_THROTTLE)) {
3555 		NDBG23(("reset throttle"));
3556 		ASSERT(ptgt->m_reset_delay == 0);
3557 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
3558 	}
3559 
3560 	/*
3561 	 * If HBA is being reset, the DevHandles are being re-initialized,
3562 	 * which means that they could be invalid even if the target is still
3563 	 * attached.  Check if being reset and if DevHandle is being
3564 	 * re-initialized.  If this is the case, return BUSY so the I/O can be
3565 	 * retried later.
3566 	 */
3567 	if ((ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) && mpt->m_in_reset) {
3568 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
3569 		if (cmd->cmd_flags & CFLAG_TXQ) {
3570 			mptsas_doneq_add(mpt, cmd);
3571 			mptsas_doneq_empty(mpt);
3572 			return (rval);
3573 		} else {
3574 			return (TRAN_BUSY);
3575 		}
3576 	}
3577 
3578 	/*
3579 	 * If device handle has already been invalidated, just
3580 	 * fail the command. In theory, command from scsi_vhci
3581 	 * client is impossible send down command with invalid
3582 	 * devhdl since devhdl is set after path offline, target
3583 	 * driver is not suppose to select a offlined path.
3584 	 */
3585 	if (ptgt->m_devhdl == MPTSAS_INVALID_DEVHDL) {
3586 		NDBG3(("rejecting command, it might because invalid devhdl "
3587 		    "request."));
3588 		mptsas_set_pkt_reason(mpt, cmd, CMD_DEV_GONE, STAT_TERMINATED);
3589 		if (cmd->cmd_flags & CFLAG_TXQ) {
3590 			mptsas_doneq_add(mpt, cmd);
3591 			mptsas_doneq_empty(mpt);
3592 			return (rval);
3593 		} else {
3594 			return (TRAN_FATAL_ERROR);
3595 		}
3596 	}
3597 	/*
3598 	 * The first case is the normal case.  mpt gets a command from the
3599 	 * target driver and starts it.
3600 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
3601 	 * commands is m_max_requests - 2.
3602 	 */
3603 	if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
3604 	    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
3605 	    (ptgt->m_t_ncmds < ptgt->m_t_throttle) &&
3606 	    (ptgt->m_reset_delay == 0) &&
3607 	    (ptgt->m_t_nwait == 0) &&
3608 	    ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0)) {
3609 		if (mptsas_save_cmd(mpt, cmd) == TRUE) {
3610 			(void) mptsas_start_cmd(mpt, cmd);
3611 		} else {
3612 			mptsas_waitq_add(mpt, cmd);
3613 		}
3614 	} else {
3615 		/*
3616 		 * Add this pkt to the work queue
3617 		 */
3618 		mptsas_waitq_add(mpt, cmd);
3619 
3620 		if (cmd->cmd_pkt_flags & FLAG_NOINTR) {
3621 			(void) mptsas_poll(mpt, cmd, MPTSAS_POLL_TIME);
3622 
3623 			/*
3624 			 * Only flush the doneq if this is not a TM
3625 			 * cmd.  For TM cmds the flushing of the
3626 			 * doneq will be done in those routines.
3627 			 */
3628 			if ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
3629 				mptsas_doneq_empty(mpt);
3630 			}
3631 		}
3632 	}
3633 	return (rval);
3634 }
3635 
3636 int
mptsas_save_cmd(mptsas_t * mpt,mptsas_cmd_t * cmd)3637 mptsas_save_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
3638 {
3639 	mptsas_slots_t *slots = mpt->m_active;
3640 	uint_t slot, start_rotor;
3641 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
3642 
3643 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
3644 
3645 	/*
3646 	 * Account for reserved TM request slot and reserved SMID of 0.
3647 	 */
3648 	ASSERT(slots->m_n_normal == (mpt->m_max_requests - 2));
3649 
3650 	/*
3651 	 * Find the next available slot, beginning at m_rotor.  If no slot is
3652 	 * available, we'll return FALSE to indicate that.  This mechanism
3653 	 * considers only the normal slots, not the reserved slot 0 nor the
3654 	 * task management slot m_n_normal + 1.  The rotor is left to point to
3655 	 * the normal slot after the one we select, unless we select the last
3656 	 * normal slot in which case it returns to slot 1.
3657 	 */
3658 	start_rotor = slots->m_rotor;
3659 	do {
3660 		slot = slots->m_rotor++;
3661 		if (slots->m_rotor > slots->m_n_normal)
3662 			slots->m_rotor = 1;
3663 
3664 		if (slots->m_rotor == start_rotor)
3665 			break;
3666 	} while (slots->m_slot[slot] != NULL);
3667 
3668 	if (slots->m_slot[slot] != NULL)
3669 		return (FALSE);
3670 
3671 	ASSERT(slot != 0 && slot <= slots->m_n_normal);
3672 
3673 	cmd->cmd_slot = slot;
3674 	slots->m_slot[slot] = cmd;
3675 	mpt->m_ncmds++;
3676 
3677 	/*
3678 	 * only increment per target ncmds if this is not a
3679 	 * command that has no target associated with it (i.e. a
3680 	 * event acknoledgment)
3681 	 */
3682 	if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
3683 		/*
3684 		 * Expiration time is set in mptsas_start_cmd
3685 		 */
3686 		ptgt->m_t_ncmds++;
3687 		cmd->cmd_active_expiration = 0;
3688 	} else {
3689 		/*
3690 		 * Initialize expiration time for passthrough commands,
3691 		 */
3692 		cmd->cmd_active_expiration = gethrtime() +
3693 		    (hrtime_t)cmd->cmd_pkt->pkt_time * NANOSEC;
3694 	}
3695 	return (TRUE);
3696 }
3697 
3698 /*
3699  * prepare the pkt:
3700  * the pkt may have been resubmitted or just reused so
3701  * initialize some fields and do some checks.
3702  */
3703 static int
mptsas_prepare_pkt(mptsas_cmd_t * cmd)3704 mptsas_prepare_pkt(mptsas_cmd_t *cmd)
3705 {
3706 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
3707 
3708 	NDBG1(("mptsas_prepare_pkt: cmd=0x%p", (void *)cmd));
3709 
3710 	/*
3711 	 * Reinitialize some fields that need it; the packet may
3712 	 * have been resubmitted
3713 	 */
3714 	pkt->pkt_reason = CMD_CMPLT;
3715 	pkt->pkt_state = 0;
3716 	pkt->pkt_statistics = 0;
3717 	pkt->pkt_resid = 0;
3718 	cmd->cmd_age = 0;
3719 	cmd->cmd_pkt_flags = pkt->pkt_flags;
3720 
3721 	/*
3722 	 * zero status byte.
3723 	 */
3724 	*(pkt->pkt_scbp) = 0;
3725 
3726 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
3727 		pkt->pkt_resid = cmd->cmd_dmacount;
3728 
3729 		/*
3730 		 * consistent packets need to be sync'ed first
3731 		 * (only for data going out)
3732 		 */
3733 		if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
3734 		    (cmd->cmd_flags & CFLAG_DMASEND)) {
3735 			(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
3736 			    DDI_DMA_SYNC_FORDEV);
3737 		}
3738 	}
3739 
3740 	cmd->cmd_flags =
3741 	    (cmd->cmd_flags & ~(CFLAG_TRANFLAG)) |
3742 	    CFLAG_PREPARED | CFLAG_IN_TRANSPORT;
3743 
3744 	return (TRAN_ACCEPT);
3745 }
3746 
3747 /*
3748  * tran_init_pkt(9E) - allocate scsi_pkt(9S) for command
3749  *
3750  * One of three possibilities:
3751  *	- allocate scsi_pkt
3752  *	- allocate scsi_pkt and DMA resources
3753  *	- allocate DMA resources to an already-allocated pkt
3754  */
3755 static struct scsi_pkt *
mptsas_scsi_init_pkt(struct scsi_address * ap,struct scsi_pkt * pkt,struct buf * bp,int cmdlen,int statuslen,int tgtlen,int flags,int (* callback)(),caddr_t arg)3756 mptsas_scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
3757     struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
3758     int (*callback)(), caddr_t arg)
3759 {
3760 	mptsas_cmd_t		*cmd, *new_cmd;
3761 	mptsas_t		*mpt = ADDR2MPT(ap);
3762 	uint_t			oldcookiec;
3763 	mptsas_target_t		*ptgt = NULL;
3764 	int			rval;
3765 	mptsas_tgt_private_t	*tgt_private;
3766 	int			kf;
3767 
3768 	kf = (callback == SLEEP_FUNC)? KM_SLEEP: KM_NOSLEEP;
3769 
3770 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
3771 	    tran_tgt_private;
3772 	ASSERT(tgt_private != NULL);
3773 	if (tgt_private == NULL) {
3774 		return (NULL);
3775 	}
3776 	ptgt = tgt_private->t_private;
3777 	ASSERT(ptgt != NULL);
3778 	if (ptgt == NULL)
3779 		return (NULL);
3780 	ap->a_target = ptgt->m_devhdl;
3781 	ap->a_lun = tgt_private->t_lun;
3782 
3783 	ASSERT(callback == NULL_FUNC || callback == SLEEP_FUNC);
3784 #ifdef MPTSAS_TEST_EXTRN_ALLOC
3785 	statuslen *= 100; tgtlen *= 4;
3786 #endif
3787 	NDBG3(("mptsas_scsi_init_pkt:\n"
3788 	    "\ttgt=%d in=0x%p bp=0x%p clen=%d slen=%d tlen=%d flags=%x",
3789 	    ap->a_target, (void *)pkt, (void *)bp,
3790 	    cmdlen, statuslen, tgtlen, flags));
3791 
3792 	/*
3793 	 * Allocate the new packet.
3794 	 */
3795 	if (pkt == NULL) {
3796 		ddi_dma_handle_t	save_dma_handle;
3797 
3798 		cmd = kmem_cache_alloc(mpt->m_kmem_cache, kf);
3799 		if (cmd == NULL)
3800 			return (NULL);
3801 
3802 		save_dma_handle = cmd->cmd_dmahandle;
3803 		bzero(cmd, sizeof (*cmd) + scsi_pkt_size());
3804 		cmd->cmd_dmahandle = save_dma_handle;
3805 
3806 		pkt = (void *)((uchar_t *)cmd +
3807 		    sizeof (struct mptsas_cmd));
3808 		pkt->pkt_ha_private = (opaque_t)cmd;
3809 		pkt->pkt_address = *ap;
3810 		pkt->pkt_private = (opaque_t)cmd->cmd_pkt_private;
3811 		pkt->pkt_scbp = (opaque_t)&cmd->cmd_scb;
3812 		pkt->pkt_cdbp = (opaque_t)&cmd->cmd_cdb;
3813 		cmd->cmd_pkt = (struct scsi_pkt *)pkt;
3814 		cmd->cmd_cdblen = (uchar_t)cmdlen;
3815 		cmd->cmd_scblen = statuslen;
3816 		cmd->cmd_rqslen = SENSE_LENGTH;
3817 		cmd->cmd_tgt_addr = ptgt;
3818 
3819 		if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
3820 		    (tgtlen > PKT_PRIV_LEN) ||
3821 		    (statuslen > EXTCMDS_STATUS_SIZE)) {
3822 			int failure;
3823 
3824 			/*
3825 			 * We are going to allocate external packet space which
3826 			 * might include the sense data buffer for DMA so we
3827 			 * need to increase the reference counter here.  In a
3828 			 * case the HBA is in reset we just simply free the
3829 			 * allocated packet and bail out.
3830 			 */
3831 			mutex_enter(&mpt->m_mutex);
3832 			if (mpt->m_in_reset) {
3833 				mutex_exit(&mpt->m_mutex);
3834 
3835 				cmd->cmd_flags = CFLAG_FREE;
3836 				kmem_cache_free(mpt->m_kmem_cache, cmd);
3837 				return (NULL);
3838 			}
3839 			mpt->m_extreq_sense_refcount++;
3840 			ASSERT(mpt->m_extreq_sense_refcount > 0);
3841 			mutex_exit(&mpt->m_mutex);
3842 
3843 			/*
3844 			 * if extern alloc fails, all will be
3845 			 * deallocated, including cmd
3846 			 */
3847 			failure = mptsas_pkt_alloc_extern(mpt, cmd,
3848 			    cmdlen, tgtlen, statuslen, kf);
3849 
3850 			if (failure != 0 || cmd->cmd_extrqslen == 0) {
3851 				/*
3852 				 * If the external packet space allocation
3853 				 * failed, or we didn't allocate the sense
3854 				 * data buffer for DMA we need to decrease the
3855 				 * reference counter.
3856 				 */
3857 				mutex_enter(&mpt->m_mutex);
3858 				ASSERT(mpt->m_extreq_sense_refcount > 0);
3859 				mpt->m_extreq_sense_refcount--;
3860 				if (mpt->m_extreq_sense_refcount == 0)
3861 					cv_broadcast(
3862 					    &mpt->m_extreq_sense_refcount_cv);
3863 				mutex_exit(&mpt->m_mutex);
3864 
3865 				if (failure != 0) {
3866 					/*
3867 					 * if extern allocation fails, it will
3868 					 * deallocate the new pkt as well
3869 					 */
3870 					return (NULL);
3871 				}
3872 			}
3873 		}
3874 		new_cmd = cmd;
3875 
3876 	} else {
3877 		cmd = PKT2CMD(pkt);
3878 		new_cmd = NULL;
3879 	}
3880 
3881 
3882 	/* grab cmd->cmd_cookiec here as oldcookiec */
3883 
3884 	oldcookiec = cmd->cmd_cookiec;
3885 
3886 	/*
3887 	 * If the dma was broken up into PARTIAL transfers cmd_nwin will be
3888 	 * greater than 0 and we'll need to grab the next dma window
3889 	 */
3890 	/*
3891 	 * SLM-not doing extra command frame right now; may add later
3892 	 */
3893 
3894 	if (cmd->cmd_nwin > 0) {
3895 
3896 		/*
3897 		 * Make sure we havn't gone past the the total number
3898 		 * of windows
3899 		 */
3900 		if (++cmd->cmd_winindex >= cmd->cmd_nwin) {
3901 			return (NULL);
3902 		}
3903 		if (ddi_dma_getwin(cmd->cmd_dmahandle, cmd->cmd_winindex,
3904 		    &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
3905 		    &cmd->cmd_cookie, &cmd->cmd_cookiec) == DDI_FAILURE) {
3906 			return (NULL);
3907 		}
3908 		goto get_dma_cookies;
3909 	}
3910 
3911 
3912 	if (flags & PKT_XARQ) {
3913 		cmd->cmd_flags |= CFLAG_XARQ;
3914 	}
3915 
3916 	/*
3917 	 * DMA resource allocation.  This version assumes your
3918 	 * HBA has some sort of bus-mastering or onboard DMA capability, with a
3919 	 * scatter-gather list of length MPTSAS_MAX_DMA_SEGS, as given in the
3920 	 * ddi_dma_attr_t structure and passed to scsi_impl_dmaget.
3921 	 */
3922 	if (bp && (bp->b_bcount != 0) &&
3923 	    (cmd->cmd_flags & CFLAG_DMAVALID) == 0) {
3924 
3925 		int	cnt, dma_flags;
3926 		mptti_t	*dmap;		/* ptr to the S/G list */
3927 
3928 		/*
3929 		 * Set up DMA memory and position to the next DMA segment.
3930 		 */
3931 		ASSERT(cmd->cmd_dmahandle != NULL);
3932 
3933 		if (bp->b_flags & B_READ) {
3934 			dma_flags = DDI_DMA_READ;
3935 			cmd->cmd_flags &= ~CFLAG_DMASEND;
3936 		} else {
3937 			dma_flags = DDI_DMA_WRITE;
3938 			cmd->cmd_flags |= CFLAG_DMASEND;
3939 		}
3940 		if (flags & PKT_CONSISTENT) {
3941 			cmd->cmd_flags |= CFLAG_CMDIOPB;
3942 			dma_flags |= DDI_DMA_CONSISTENT;
3943 		}
3944 
3945 		if (flags & PKT_DMA_PARTIAL) {
3946 			dma_flags |= DDI_DMA_PARTIAL;
3947 		}
3948 
3949 		/*
3950 		 * workaround for byte hole issue on psycho and
3951 		 * schizo pre 2.1
3952 		 */
3953 		if ((bp->b_flags & B_READ) && ((bp->b_flags &
3954 		    (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
3955 		    ((uintptr_t)bp->b_un.b_addr & 0x7)) {
3956 			dma_flags |= DDI_DMA_CONSISTENT;
3957 		}
3958 
3959 		rval = ddi_dma_buf_bind_handle(cmd->cmd_dmahandle, bp,
3960 		    dma_flags, callback, arg,
3961 		    &cmd->cmd_cookie, &cmd->cmd_cookiec);
3962 		if (rval == DDI_DMA_PARTIAL_MAP) {
3963 			(void) ddi_dma_numwin(cmd->cmd_dmahandle,
3964 			    &cmd->cmd_nwin);
3965 			cmd->cmd_winindex = 0;
3966 			(void) ddi_dma_getwin(cmd->cmd_dmahandle,
3967 			    cmd->cmd_winindex, &cmd->cmd_dma_offset,
3968 			    &cmd->cmd_dma_len, &cmd->cmd_cookie,
3969 			    &cmd->cmd_cookiec);
3970 		} else if (rval && (rval != DDI_DMA_MAPPED)) {
3971 			switch (rval) {
3972 			case DDI_DMA_NORESOURCES:
3973 				bioerror(bp, 0);
3974 				break;
3975 			case DDI_DMA_BADATTR:
3976 			case DDI_DMA_NOMAPPING:
3977 				bioerror(bp, EFAULT);
3978 				break;
3979 			case DDI_DMA_TOOBIG:
3980 			default:
3981 				bioerror(bp, EINVAL);
3982 				break;
3983 			}
3984 			cmd->cmd_flags &= ~CFLAG_DMAVALID;
3985 			if (new_cmd) {
3986 				mptsas_scsi_destroy_pkt(ap, pkt);
3987 			}
3988 			return ((struct scsi_pkt *)NULL);
3989 		}
3990 
3991 get_dma_cookies:
3992 		cmd->cmd_flags |= CFLAG_DMAVALID;
3993 		ASSERT(cmd->cmd_cookiec > 0);
3994 
3995 		if (cmd->cmd_cookiec > MPTSAS_MAX_CMD_SEGS) {
3996 			mptsas_log(mpt, CE_NOTE, "large cookiec received %d\n",
3997 			    cmd->cmd_cookiec);
3998 			bioerror(bp, EINVAL);
3999 			if (new_cmd) {
4000 				mptsas_scsi_destroy_pkt(ap, pkt);
4001 			}
4002 			return ((struct scsi_pkt *)NULL);
4003 		}
4004 
4005 		/*
4006 		 * Allocate extra SGL buffer if needed.
4007 		 */
4008 		if ((cmd->cmd_cookiec > MPTSAS_MAX_FRAME_SGES64(mpt)) &&
4009 		    (cmd->cmd_extra_frames == NULL)) {
4010 			if (mptsas_alloc_extra_sgl_frame(mpt, cmd) ==
4011 			    DDI_FAILURE) {
4012 				mptsas_log(mpt, CE_WARN, "MPT SGL mem alloc "
4013 				    "failed");
4014 				bioerror(bp, ENOMEM);
4015 				if (new_cmd) {
4016 					mptsas_scsi_destroy_pkt(ap, pkt);
4017 				}
4018 				return ((struct scsi_pkt *)NULL);
4019 			}
4020 		}
4021 
4022 		/*
4023 		 * Always use scatter-gather transfer
4024 		 * Use the loop below to store physical addresses of
4025 		 * DMA segments, from the DMA cookies, into your HBA's
4026 		 * scatter-gather list.
4027 		 * We need to ensure we have enough kmem alloc'd
4028 		 * for the sg entries since we are no longer using an
4029 		 * array inside mptsas_cmd_t.
4030 		 *
4031 		 * We check cmd->cmd_cookiec against oldcookiec so
4032 		 * the scatter-gather list is correctly allocated
4033 		 */
4034 
4035 		if (oldcookiec != cmd->cmd_cookiec) {
4036 			if (cmd->cmd_sg != (mptti_t *)NULL) {
4037 				kmem_free(cmd->cmd_sg, sizeof (mptti_t) *
4038 				    oldcookiec);
4039 				cmd->cmd_sg = NULL;
4040 			}
4041 		}
4042 
4043 		if (cmd->cmd_sg == (mptti_t *)NULL) {
4044 			cmd->cmd_sg = kmem_alloc((size_t)(sizeof (mptti_t)*
4045 			    cmd->cmd_cookiec), kf);
4046 
4047 			if (cmd->cmd_sg == (mptti_t *)NULL) {
4048 				mptsas_log(mpt, CE_WARN,
4049 				    "unable to kmem_alloc enough memory "
4050 				    "for scatter/gather list");
4051 		/*
4052 		 * if we have an ENOMEM condition we need to behave
4053 		 * the same way as the rest of this routine
4054 		 */
4055 
4056 				bioerror(bp, ENOMEM);
4057 				if (new_cmd) {
4058 					mptsas_scsi_destroy_pkt(ap, pkt);
4059 				}
4060 				return ((struct scsi_pkt *)NULL);
4061 			}
4062 		}
4063 
4064 		dmap = cmd->cmd_sg;
4065 
4066 		ASSERT(cmd->cmd_cookie.dmac_size != 0);
4067 
4068 		/*
4069 		 * store the first segment into the S/G list
4070 		 */
4071 		dmap->count = cmd->cmd_cookie.dmac_size;
4072 		dmap->addr.address64.Low = (uint32_t)
4073 		    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
4074 		dmap->addr.address64.High = (uint32_t)
4075 		    (cmd->cmd_cookie.dmac_laddress >> 32);
4076 
4077 		/*
4078 		 * dmacount counts the size of the dma for this window
4079 		 * (if partial dma is being used).  totaldmacount
4080 		 * keeps track of the total amount of dma we have
4081 		 * transferred for all the windows (needed to calculate
4082 		 * the resid value below).
4083 		 */
4084 		cmd->cmd_dmacount = cmd->cmd_cookie.dmac_size;
4085 		cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
4086 
4087 		/*
4088 		 * We already stored the first DMA scatter gather segment,
4089 		 * start at 1 if we need to store more.
4090 		 */
4091 		for (cnt = 1; cnt < cmd->cmd_cookiec; cnt++) {
4092 			/*
4093 			 * Get next DMA cookie
4094 			 */
4095 			ddi_dma_nextcookie(cmd->cmd_dmahandle,
4096 			    &cmd->cmd_cookie);
4097 			dmap++;
4098 
4099 			cmd->cmd_dmacount += cmd->cmd_cookie.dmac_size;
4100 			cmd->cmd_totaldmacount += cmd->cmd_cookie.dmac_size;
4101 
4102 			/*
4103 			 * store the segment parms into the S/G list
4104 			 */
4105 			dmap->count = cmd->cmd_cookie.dmac_size;
4106 			dmap->addr.address64.Low = (uint32_t)
4107 			    (cmd->cmd_cookie.dmac_laddress & 0xffffffffull);
4108 			dmap->addr.address64.High = (uint32_t)
4109 			    (cmd->cmd_cookie.dmac_laddress >> 32);
4110 		}
4111 
4112 		/*
4113 		 * If this was partially allocated we set the resid
4114 		 * the amount of data NOT transferred in this window
4115 		 * If there is only one window, the resid will be 0
4116 		 */
4117 		pkt->pkt_resid = (bp->b_bcount - cmd->cmd_totaldmacount);
4118 		NDBG3(("mptsas_scsi_init_pkt: cmd_dmacount=%d.",
4119 		    cmd->cmd_dmacount));
4120 	}
4121 	return (pkt);
4122 }
4123 
4124 /*
4125  * tran_destroy_pkt(9E) - scsi_pkt(9s) deallocation
4126  *
4127  * Notes:
4128  *	- also frees DMA resources if allocated
4129  *	- implicit DMA synchonization
4130  */
4131 static void
mptsas_scsi_destroy_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)4132 mptsas_scsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4133 {
4134 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4135 	mptsas_t	*mpt = ADDR2MPT(ap);
4136 
4137 	NDBG3(("mptsas_scsi_destroy_pkt: target=%d pkt=0x%p",
4138 	    ap->a_target, (void *)pkt));
4139 
4140 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4141 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4142 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
4143 	}
4144 
4145 	if (cmd->cmd_sg) {
4146 		kmem_free(cmd->cmd_sg, sizeof (mptti_t) * cmd->cmd_cookiec);
4147 		cmd->cmd_sg = NULL;
4148 	}
4149 
4150 	mptsas_free_extra_sgl_frame(mpt, cmd);
4151 
4152 	if ((cmd->cmd_flags &
4153 	    (CFLAG_FREE | CFLAG_CDBEXTERN | CFLAG_PRIVEXTERN |
4154 	    CFLAG_SCBEXTERN)) == 0) {
4155 		cmd->cmd_flags = CFLAG_FREE;
4156 		kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4157 	} else {
4158 		boolean_t extrqslen = cmd->cmd_extrqslen != 0;
4159 
4160 		mptsas_pkt_destroy_extern(mpt, cmd);
4161 
4162 		/*
4163 		 * If the packet had the sense data buffer for DMA allocated we
4164 		 * need to decrease the reference counter.
4165 		 */
4166 		if (extrqslen) {
4167 			mutex_enter(&mpt->m_mutex);
4168 			ASSERT(mpt->m_extreq_sense_refcount > 0);
4169 			mpt->m_extreq_sense_refcount--;
4170 			if (mpt->m_extreq_sense_refcount == 0)
4171 				cv_broadcast(&mpt->m_extreq_sense_refcount_cv);
4172 			mutex_exit(&mpt->m_mutex);
4173 		}
4174 	}
4175 }
4176 
4177 /*
4178  * kmem cache constructor and destructor:
4179  * When constructing, we bzero the cmd and allocate the dma handle
4180  * When destructing, just free the dma handle
4181  */
4182 static int
mptsas_kmem_cache_constructor(void * buf,void * cdrarg,int kmflags)4183 mptsas_kmem_cache_constructor(void *buf, void *cdrarg, int kmflags)
4184 {
4185 	mptsas_cmd_t		*cmd = buf;
4186 	mptsas_t		*mpt  = cdrarg;
4187 	int			(*callback)(caddr_t);
4188 
4189 	callback = (kmflags == KM_SLEEP)? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4190 
4191 	NDBG4(("mptsas_kmem_cache_constructor"));
4192 
4193 	/*
4194 	 * allocate a dma handle
4195 	 */
4196 	if ((ddi_dma_alloc_handle(mpt->m_dip, &mpt->m_io_dma_attr, callback,
4197 	    NULL, &cmd->cmd_dmahandle)) != DDI_SUCCESS) {
4198 		cmd->cmd_dmahandle = NULL;
4199 		return (-1);
4200 	}
4201 	return (0);
4202 }
4203 
4204 static void
mptsas_kmem_cache_destructor(void * buf,void * cdrarg)4205 mptsas_kmem_cache_destructor(void *buf, void *cdrarg)
4206 {
4207 #ifndef __lock_lint
4208 	_NOTE(ARGUNUSED(cdrarg))
4209 #endif
4210 	mptsas_cmd_t	*cmd = buf;
4211 
4212 	NDBG4(("mptsas_kmem_cache_destructor"));
4213 
4214 	if (cmd->cmd_dmahandle) {
4215 		ddi_dma_free_handle(&cmd->cmd_dmahandle);
4216 		cmd->cmd_dmahandle = NULL;
4217 	}
4218 }
4219 
4220 static int
mptsas_cache_frames_constructor(void * buf,void * cdrarg,int kmflags)4221 mptsas_cache_frames_constructor(void *buf, void *cdrarg, int kmflags)
4222 {
4223 	mptsas_cache_frames_t	*p = buf;
4224 	mptsas_t		*mpt = cdrarg;
4225 	ddi_dma_attr_t		frame_dma_attr;
4226 	size_t			mem_size, alloc_len;
4227 	ddi_dma_cookie_t	cookie;
4228 	uint_t			ncookie;
4229 	int (*callback)(caddr_t) = (kmflags == KM_SLEEP)
4230 	    ? DDI_DMA_SLEEP: DDI_DMA_DONTWAIT;
4231 
4232 	frame_dma_attr = mpt->m_msg_dma_attr;
4233 	frame_dma_attr.dma_attr_align = 0x10;
4234 	frame_dma_attr.dma_attr_sgllen = 1;
4235 
4236 	if (ddi_dma_alloc_handle(mpt->m_dip, &frame_dma_attr, callback, NULL,
4237 	    &p->m_dma_hdl) != DDI_SUCCESS) {
4238 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma handle for"
4239 		    " extra SGL.");
4240 		return (DDI_FAILURE);
4241 	}
4242 
4243 	mem_size = (mpt->m_max_request_frames - 1) * mpt->m_req_frame_size;
4244 
4245 	if (ddi_dma_mem_alloc(p->m_dma_hdl, mem_size, &mpt->m_dev_acc_attr,
4246 	    DDI_DMA_CONSISTENT, callback, NULL, (caddr_t *)&p->m_frames_addr,
4247 	    &alloc_len, &p->m_acc_hdl) != DDI_SUCCESS) {
4248 		ddi_dma_free_handle(&p->m_dma_hdl);
4249 		p->m_dma_hdl = NULL;
4250 		mptsas_log(mpt, CE_WARN, "Unable to allocate dma memory for"
4251 		    " extra SGL.");
4252 		return (DDI_FAILURE);
4253 	}
4254 
4255 	if (ddi_dma_addr_bind_handle(p->m_dma_hdl, NULL, p->m_frames_addr,
4256 	    alloc_len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, callback, NULL,
4257 	    &cookie, &ncookie) != DDI_DMA_MAPPED) {
4258 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
4259 		ddi_dma_free_handle(&p->m_dma_hdl);
4260 		p->m_dma_hdl = NULL;
4261 		mptsas_log(mpt, CE_WARN, "Unable to bind DMA resources for"
4262 		    " extra SGL");
4263 		return (DDI_FAILURE);
4264 	}
4265 
4266 	/*
4267 	 * Store the SGL memory address.  This chip uses this
4268 	 * address to dma to and from the driver.  The second
4269 	 * address is the address mpt uses to fill in the SGL.
4270 	 */
4271 	p->m_phys_addr = cookie.dmac_laddress;
4272 
4273 	return (DDI_SUCCESS);
4274 }
4275 
4276 static void
mptsas_cache_frames_destructor(void * buf,void * cdrarg)4277 mptsas_cache_frames_destructor(void *buf, void *cdrarg)
4278 {
4279 #ifndef __lock_lint
4280 	_NOTE(ARGUNUSED(cdrarg))
4281 #endif
4282 	mptsas_cache_frames_t	*p = buf;
4283 	if (p->m_dma_hdl != NULL) {
4284 		(void) ddi_dma_unbind_handle(p->m_dma_hdl);
4285 		(void) ddi_dma_mem_free(&p->m_acc_hdl);
4286 		ddi_dma_free_handle(&p->m_dma_hdl);
4287 		p->m_phys_addr = 0;
4288 		p->m_frames_addr = NULL;
4289 		p->m_dma_hdl = NULL;
4290 		p->m_acc_hdl = NULL;
4291 	}
4292 
4293 }
4294 
4295 /*
4296  * Figure out if we need to use a different method for the request
4297  * sense buffer and allocate from the map if necessary.
4298  */
4299 static boolean_t
mptsas_cmdarqsize(mptsas_t * mpt,mptsas_cmd_t * cmd,size_t senselength,int kf)4300 mptsas_cmdarqsize(mptsas_t *mpt, mptsas_cmd_t *cmd, size_t senselength, int kf)
4301 {
4302 	if (senselength > mpt->m_req_sense_size) {
4303 		unsigned long i;
4304 
4305 		/* Sense length is limited to an 8 bit value in MPI Spec. */
4306 		if (senselength > 255)
4307 			senselength = 255;
4308 		cmd->cmd_extrqschunks = (senselength +
4309 		    (mpt->m_req_sense_size - 1))/mpt->m_req_sense_size;
4310 		i = (kf == KM_SLEEP ? rmalloc_wait : rmalloc)
4311 		    (mpt->m_erqsense_map, cmd->cmd_extrqschunks);
4312 
4313 		if (i == 0)
4314 			return (B_FALSE);
4315 
4316 		cmd->cmd_extrqslen = (uint16_t)senselength;
4317 		cmd->cmd_extrqsidx = i - 1;
4318 		cmd->cmd_arq_buf = mpt->m_extreq_sense +
4319 		    (cmd->cmd_extrqsidx * mpt->m_req_sense_size);
4320 	} else {
4321 		cmd->cmd_rqslen = (uchar_t)senselength;
4322 	}
4323 
4324 	return (B_TRUE);
4325 }
4326 
4327 /*
4328  * allocate and deallocate external pkt space (ie. not part of mptsas_cmd)
4329  * for non-standard length cdb, pkt_private, status areas
4330  * if allocation fails, then deallocate all external space and the pkt
4331  */
4332 /* ARGSUSED */
4333 static int
mptsas_pkt_alloc_extern(mptsas_t * mpt,mptsas_cmd_t * cmd,int cmdlen,int tgtlen,int statuslen,int kf)4334 mptsas_pkt_alloc_extern(mptsas_t *mpt, mptsas_cmd_t *cmd,
4335     int cmdlen, int tgtlen, int statuslen, int kf)
4336 {
4337 	caddr_t			cdbp, scbp, tgt;
4338 
4339 	NDBG3(("mptsas_pkt_alloc_extern: "
4340 	    "cmd=0x%p cmdlen=%d tgtlen=%d statuslen=%d kf=%x",
4341 	    (void *)cmd, cmdlen, tgtlen, statuslen, kf));
4342 
4343 	tgt = cdbp = scbp = NULL;
4344 	cmd->cmd_scblen		= statuslen;
4345 	cmd->cmd_privlen	= (uchar_t)tgtlen;
4346 
4347 	if (cmdlen > sizeof (cmd->cmd_cdb)) {
4348 		if ((cdbp = kmem_zalloc((size_t)cmdlen, kf)) == NULL) {
4349 			goto fail;
4350 		}
4351 		cmd->cmd_pkt->pkt_cdbp = (opaque_t)cdbp;
4352 		cmd->cmd_flags |= CFLAG_CDBEXTERN;
4353 	}
4354 	if (tgtlen > PKT_PRIV_LEN) {
4355 		if ((tgt = kmem_zalloc((size_t)tgtlen, kf)) == NULL) {
4356 			goto fail;
4357 		}
4358 		cmd->cmd_flags |= CFLAG_PRIVEXTERN;
4359 		cmd->cmd_pkt->pkt_private = tgt;
4360 	}
4361 	if (statuslen > EXTCMDS_STATUS_SIZE) {
4362 		if ((scbp = kmem_zalloc((size_t)statuslen, kf)) == NULL) {
4363 			goto fail;
4364 		}
4365 		cmd->cmd_flags |= CFLAG_SCBEXTERN;
4366 		cmd->cmd_pkt->pkt_scbp = (opaque_t)scbp;
4367 
4368 		/* allocate sense data buf for DMA */
4369 		if (mptsas_cmdarqsize(mpt, cmd, statuslen -
4370 		    MPTSAS_GET_ITEM_OFF(struct scsi_arq_status, sts_sensedata),
4371 		    kf) == B_FALSE)
4372 			goto fail;
4373 	}
4374 	return (0);
4375 fail:
4376 	mptsas_pkt_destroy_extern(mpt, cmd);
4377 	return (1);
4378 }
4379 
4380 /*
4381  * deallocate external pkt space and deallocate the pkt
4382  */
4383 static void
mptsas_pkt_destroy_extern(mptsas_t * mpt,mptsas_cmd_t * cmd)4384 mptsas_pkt_destroy_extern(mptsas_t *mpt, mptsas_cmd_t *cmd)
4385 {
4386 	NDBG3(("mptsas_pkt_destroy_extern: cmd=0x%p", (void *)cmd));
4387 
4388 	if (cmd->cmd_flags & CFLAG_FREE) {
4389 		mptsas_log(mpt, CE_PANIC,
4390 		    "mptsas_pkt_destroy_extern: freeing free packet");
4391 		_NOTE(NOT_REACHED)
4392 		/* NOTREACHED */
4393 	}
4394 	if (cmd->cmd_extrqslen != 0) {
4395 		rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
4396 		    cmd->cmd_extrqsidx + 1);
4397 	}
4398 	if (cmd->cmd_flags & CFLAG_CDBEXTERN) {
4399 		kmem_free(cmd->cmd_pkt->pkt_cdbp, (size_t)cmd->cmd_cdblen);
4400 	}
4401 	if (cmd->cmd_flags & CFLAG_SCBEXTERN) {
4402 		kmem_free(cmd->cmd_pkt->pkt_scbp, (size_t)cmd->cmd_scblen);
4403 	}
4404 	if (cmd->cmd_flags & CFLAG_PRIVEXTERN) {
4405 		kmem_free(cmd->cmd_pkt->pkt_private, (size_t)cmd->cmd_privlen);
4406 	}
4407 	cmd->cmd_flags = CFLAG_FREE;
4408 	kmem_cache_free(mpt->m_kmem_cache, (void *)cmd);
4409 }
4410 
4411 /*
4412  * tran_sync_pkt(9E) - explicit DMA synchronization
4413  */
4414 /*ARGSUSED*/
4415 static void
mptsas_scsi_sync_pkt(struct scsi_address * ap,struct scsi_pkt * pkt)4416 mptsas_scsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
4417 {
4418 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4419 
4420 	NDBG3(("mptsas_scsi_sync_pkt: target=%d, pkt=0x%p",
4421 	    ap->a_target, (void *)pkt));
4422 
4423 	if (cmd->cmd_dmahandle) {
4424 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4425 		    (cmd->cmd_flags & CFLAG_DMASEND) ?
4426 		    DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
4427 	}
4428 }
4429 
4430 /*
4431  * tran_dmafree(9E) - deallocate DMA resources allocated for command
4432  */
4433 /*ARGSUSED*/
4434 static void
mptsas_scsi_dmafree(struct scsi_address * ap,struct scsi_pkt * pkt)4435 mptsas_scsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
4436 {
4437 	mptsas_cmd_t	*cmd = PKT2CMD(pkt);
4438 	mptsas_t	*mpt = ADDR2MPT(ap);
4439 
4440 	NDBG3(("mptsas_scsi_dmafree: target=%d pkt=0x%p",
4441 	    ap->a_target, (void *)pkt));
4442 
4443 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
4444 		(void) ddi_dma_unbind_handle(cmd->cmd_dmahandle);
4445 		cmd->cmd_flags &= ~CFLAG_DMAVALID;
4446 	}
4447 
4448 	mptsas_free_extra_sgl_frame(mpt, cmd);
4449 }
4450 
4451 static void
mptsas_pkt_comp(struct scsi_pkt * pkt,mptsas_cmd_t * cmd)4452 mptsas_pkt_comp(struct scsi_pkt *pkt, mptsas_cmd_t *cmd)
4453 {
4454 	if ((cmd->cmd_flags & CFLAG_CMDIOPB) &&
4455 	    (!(cmd->cmd_flags & CFLAG_DMASEND))) {
4456 		(void) ddi_dma_sync(cmd->cmd_dmahandle, 0, 0,
4457 		    DDI_DMA_SYNC_FORCPU);
4458 	}
4459 	(*pkt->pkt_comp)(pkt);
4460 }
4461 
4462 static void
mptsas_sge_mainframe(mptsas_cmd_t * cmd,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl,uint_t cookiec,uint32_t end_flags)4463 mptsas_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4464     ddi_acc_handle_t acc_hdl, uint_t cookiec, uint32_t end_flags)
4465 {
4466 	pMpi2SGESimple64_t	sge;
4467 	mptti_t			*dmap;
4468 	uint32_t		flags;
4469 
4470 	dmap = cmd->cmd_sg;
4471 
4472 	sge = (pMpi2SGESimple64_t)(&frame->SGL);
4473 	while (cookiec--) {
4474 		ddi_put32(acc_hdl,
4475 		    &sge->Address.Low, dmap->addr.address64.Low);
4476 		ddi_put32(acc_hdl,
4477 		    &sge->Address.High, dmap->addr.address64.High);
4478 		ddi_put32(acc_hdl, &sge->FlagsLength,
4479 		    dmap->count);
4480 		flags = ddi_get32(acc_hdl, &sge->FlagsLength);
4481 		flags |= ((uint32_t)
4482 		    (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4483 		    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4484 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4485 		    MPI2_SGE_FLAGS_SHIFT);
4486 
4487 		/*
4488 		 * If this is the last cookie, we set the flags
4489 		 * to indicate so
4490 		 */
4491 		if (cookiec == 0) {
4492 			flags |= end_flags;
4493 		}
4494 		if (cmd->cmd_flags & CFLAG_DMASEND) {
4495 			flags |= (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4496 			    MPI2_SGE_FLAGS_SHIFT);
4497 		} else {
4498 			flags |= (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4499 			    MPI2_SGE_FLAGS_SHIFT);
4500 		}
4501 		ddi_put32(acc_hdl, &sge->FlagsLength, flags);
4502 		dmap++;
4503 		sge++;
4504 	}
4505 }
4506 
4507 static void
mptsas_sge_chain(mptsas_t * mpt,mptsas_cmd_t * cmd,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl)4508 mptsas_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4509     pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4510 {
4511 	pMpi2SGESimple64_t	sge;
4512 	pMpi2SGEChain64_t	sgechain;
4513 	uint64_t		nframe_phys_addr;
4514 	uint_t			cookiec;
4515 	mptti_t			*dmap;
4516 	uint32_t		flags;
4517 
4518 	/*
4519 	 * Save the number of entries in the DMA
4520 	 * Scatter/Gather list
4521 	 */
4522 	cookiec = cmd->cmd_cookiec;
4523 
4524 	/*
4525 	 * Hereby we start to deal with multiple frames.
4526 	 * The process is as follows:
4527 	 * 1. Determine how many frames are needed for SGL element
4528 	 *    storage; Note that all frames are stored in contiguous
4529 	 *    memory space and in 64-bit DMA mode each element is
4530 	 *    3 double-words (12 bytes) long.
4531 	 * 2. Fill up the main frame. We need to do this separately
4532 	 *    since it contains the SCSI IO request header and needs
4533 	 *    dedicated processing. Note that the last 4 double-words
4534 	 *    of the SCSI IO header is for SGL element storage
4535 	 *    (MPI2_SGE_IO_UNION).
4536 	 * 3. Fill the chain element in the main frame, so the DMA
4537 	 *    engine can use the following frames.
4538 	 * 4. Enter a loop to fill the remaining frames. Note that the
4539 	 *    last frame contains no chain element.  The remaining
4540 	 *    frames go into the mpt SGL buffer allocated on the fly,
4541 	 *    not immediately following the main message frame, as in
4542 	 *    Gen1.
4543 	 * Some restrictions:
4544 	 * 1. For 64-bit DMA, the simple element and chain element
4545 	 *    are both of 3 double-words (12 bytes) in size, even
4546 	 *    though all frames are stored in the first 4G of mem
4547 	 *    range and the higher 32-bits of the address are always 0.
4548 	 * 2. On some controllers (like the 1064/1068), a frame can
4549 	 *    hold SGL elements with the last 1 or 2 double-words
4550 	 *    (4 or 8 bytes) un-used. On these controllers, we should
4551 	 *    recognize that there's not enough room for another SGL
4552 	 *    element and move the sge pointer to the next frame.
4553 	 */
4554 	int			i, j, k, l, frames, sgemax;
4555 	int			temp;
4556 	uint8_t			chainflags;
4557 	uint16_t		chainlength;
4558 	mptsas_cache_frames_t	*p;
4559 
4560 	/*
4561 	 * Sgemax is the number of SGE's that will fit
4562 	 * each extra frame and frames is total
4563 	 * number of frames we'll need.  1 sge entry per
4564 	 * frame is reseverd for the chain element thus the -1 below.
4565 	 */
4566 	sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_SGE_SIMPLE64))
4567 	    - 1);
4568 	temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4569 
4570 	/*
4571 	 * A little check to see if we need to round up the number
4572 	 * of frames we need
4573 	 */
4574 	if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4575 	    sgemax) > 1) {
4576 		frames = (temp + 1);
4577 	} else {
4578 		frames = temp;
4579 	}
4580 	dmap = cmd->cmd_sg;
4581 	sge = (pMpi2SGESimple64_t)(&frame->SGL);
4582 
4583 	/*
4584 	 * First fill in the main frame
4585 	 */
4586 	j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4587 	mptsas_sge_mainframe(cmd, frame, acc_hdl, j,
4588 	    ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4589 	    MPI2_SGE_FLAGS_SHIFT));
4590 	dmap += j;
4591 	sge += j;
4592 	j++;
4593 
4594 	/*
4595 	 * Fill in the chain element in the main frame.
4596 	 * About calculation on ChainOffset:
4597 	 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4598 	 *    in the end reserved for SGL element storage
4599 	 *    (MPI2_SGE_IO_UNION); we should count it in our
4600 	 *    calculation.  See its definition in the header file.
4601 	 * 2. Constant j is the counter of the current SGL element
4602 	 *    that will be processed, and (j - 1) is the number of
4603 	 *    SGL elements that have been processed (stored in the
4604 	 *    main frame).
4605 	 * 3. ChainOffset value should be in units of double-words (4
4606 	 *    bytes) so the last value should be divided by 4.
4607 	 */
4608 	ddi_put8(acc_hdl, &frame->ChainOffset,
4609 	    (sizeof (MPI2_SCSI_IO_REQUEST) -
4610 	    sizeof (MPI2_SGE_IO_UNION) +
4611 	    (j - 1) * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4612 	sgechain = (pMpi2SGEChain64_t)sge;
4613 	chainflags = (MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4614 	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4615 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4616 	ddi_put8(acc_hdl, &sgechain->Flags, chainflags);
4617 
4618 	/*
4619 	 * The size of the next frame is the accurate size of space
4620 	 * (in bytes) used to store the SGL elements. j is the counter
4621 	 * of SGL elements. (j - 1) is the number of SGL elements that
4622 	 * have been processed (stored in frames).
4623 	 */
4624 	if (frames >= 2) {
4625 		ASSERT(mpt->m_req_frame_size >= sizeof (MPI2_SGE_SIMPLE64));
4626 		chainlength = mpt->m_req_frame_size /
4627 		    sizeof (MPI2_SGE_SIMPLE64) *
4628 		    sizeof (MPI2_SGE_SIMPLE64);
4629 	} else {
4630 		chainlength = ((cookiec - (j - 1)) *
4631 		    sizeof (MPI2_SGE_SIMPLE64));
4632 	}
4633 
4634 	p = cmd->cmd_extra_frames;
4635 
4636 	ddi_put16(acc_hdl, &sgechain->Length, chainlength);
4637 	ddi_put32(acc_hdl, &sgechain->Address.Low, p->m_phys_addr);
4638 	ddi_put32(acc_hdl, &sgechain->Address.High, p->m_phys_addr >> 32);
4639 
4640 	/*
4641 	 * If there are more than 2 frames left we have to
4642 	 * fill in the next chain offset to the location of
4643 	 * the chain element in the next frame.
4644 	 * sgemax is the number of simple elements in an extra
4645 	 * frame. Note that the value NextChainOffset should be
4646 	 * in double-words (4 bytes).
4647 	 */
4648 	if (frames >= 2) {
4649 		ddi_put8(acc_hdl, &sgechain->NextChainOffset,
4650 		    (sgemax * sizeof (MPI2_SGE_SIMPLE64)) >> 2);
4651 	} else {
4652 		ddi_put8(acc_hdl, &sgechain->NextChainOffset, 0);
4653 	}
4654 
4655 	/*
4656 	 * Jump to next frame;
4657 	 * Starting here, chain buffers go into the per command SGL.
4658 	 * This buffer is allocated when chain buffers are needed.
4659 	 */
4660 	sge = (pMpi2SGESimple64_t)p->m_frames_addr;
4661 	i = cookiec;
4662 
4663 	/*
4664 	 * Start filling in frames with SGE's.  If we
4665 	 * reach the end of frame and still have SGE's
4666 	 * to fill we need to add a chain element and
4667 	 * use another frame.  j will be our counter
4668 	 * for what cookie we are at and i will be
4669 	 * the total cookiec. k is the current frame
4670 	 */
4671 	for (k = 1; k <= frames; k++) {
4672 		for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
4673 
4674 			/*
4675 			 * If we have reached the end of frame
4676 			 * and we have more SGE's to fill in
4677 			 * we have to fill the final entry
4678 			 * with a chain element and then
4679 			 * continue to the next frame
4680 			 */
4681 			if ((l == (sgemax + 1)) && (k != frames)) {
4682 				sgechain = (pMpi2SGEChain64_t)sge;
4683 				j--;
4684 				chainflags = (
4685 				    MPI2_SGE_FLAGS_CHAIN_ELEMENT |
4686 				    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4687 				    MPI2_SGE_FLAGS_64_BIT_ADDRESSING);
4688 				ddi_put8(p->m_acc_hdl,
4689 				    &sgechain->Flags, chainflags);
4690 				/*
4691 				 * k is the frame counter and (k + 1)
4692 				 * is the number of the next frame.
4693 				 * Note that frames are in contiguous
4694 				 * memory space.
4695 				 */
4696 				nframe_phys_addr = p->m_phys_addr +
4697 				    (mpt->m_req_frame_size * k);
4698 				ddi_put32(p->m_acc_hdl,
4699 				    &sgechain->Address.Low,
4700 				    nframe_phys_addr);
4701 				ddi_put32(p->m_acc_hdl,
4702 				    &sgechain->Address.High,
4703 				    nframe_phys_addr >> 32);
4704 
4705 				/*
4706 				 * If there are more than 2 frames left
4707 				 * we have to next chain offset to
4708 				 * the location of the chain element
4709 				 * in the next frame and fill in the
4710 				 * length of the next chain
4711 				 */
4712 				if ((frames - k) >= 2) {
4713 					ddi_put8(p->m_acc_hdl,
4714 					    &sgechain->NextChainOffset,
4715 					    (sgemax *
4716 					    sizeof (MPI2_SGE_SIMPLE64))
4717 					    >> 2);
4718 					ddi_put16(p->m_acc_hdl,
4719 					    &sgechain->Length,
4720 					    mpt->m_req_frame_size /
4721 					    sizeof (MPI2_SGE_SIMPLE64) *
4722 					    sizeof (MPI2_SGE_SIMPLE64));
4723 				} else {
4724 					/*
4725 					 * This is the last frame. Set
4726 					 * the NextChainOffset to 0 and
4727 					 * Length is the total size of
4728 					 * all remaining simple elements
4729 					 */
4730 					ddi_put8(p->m_acc_hdl,
4731 					    &sgechain->NextChainOffset,
4732 					    0);
4733 					ddi_put16(p->m_acc_hdl,
4734 					    &sgechain->Length,
4735 					    (cookiec - j) *
4736 					    sizeof (MPI2_SGE_SIMPLE64));
4737 				}
4738 
4739 				/* Jump to the next frame */
4740 				sge = (pMpi2SGESimple64_t)
4741 				    ((char *)p->m_frames_addr +
4742 				    (int)mpt->m_req_frame_size * k);
4743 
4744 				continue;
4745 			}
4746 
4747 			ddi_put32(p->m_acc_hdl,
4748 			    &sge->Address.Low,
4749 			    dmap->addr.address64.Low);
4750 			ddi_put32(p->m_acc_hdl,
4751 			    &sge->Address.High,
4752 			    dmap->addr.address64.High);
4753 			ddi_put32(p->m_acc_hdl,
4754 			    &sge->FlagsLength, dmap->count);
4755 			flags = ddi_get32(p->m_acc_hdl,
4756 			    &sge->FlagsLength);
4757 			flags |= ((uint32_t)(
4758 			    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
4759 			    MPI2_SGE_FLAGS_SYSTEM_ADDRESS |
4760 			    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
4761 			    MPI2_SGE_FLAGS_SHIFT);
4762 
4763 			/*
4764 			 * If we are at the end of the frame and
4765 			 * there is another frame to fill in
4766 			 * we set the last simple element as last
4767 			 * element
4768 			 */
4769 			if ((l == sgemax) && (k != frames)) {
4770 				flags |= ((uint32_t)
4771 				    (MPI2_SGE_FLAGS_LAST_ELEMENT) <<
4772 				    MPI2_SGE_FLAGS_SHIFT);
4773 			}
4774 
4775 			/*
4776 			 * If this is the final cookie we
4777 			 * indicate it by setting the flags
4778 			 */
4779 			if (j == i) {
4780 				flags |= ((uint32_t)
4781 				    (MPI2_SGE_FLAGS_LAST_ELEMENT |
4782 				    MPI2_SGE_FLAGS_END_OF_BUFFER |
4783 				    MPI2_SGE_FLAGS_END_OF_LIST) <<
4784 				    MPI2_SGE_FLAGS_SHIFT);
4785 			}
4786 			if (cmd->cmd_flags & CFLAG_DMASEND) {
4787 				flags |=
4788 				    (MPI2_SGE_FLAGS_HOST_TO_IOC <<
4789 				    MPI2_SGE_FLAGS_SHIFT);
4790 			} else {
4791 				flags |=
4792 				    (MPI2_SGE_FLAGS_IOC_TO_HOST <<
4793 				    MPI2_SGE_FLAGS_SHIFT);
4794 			}
4795 			ddi_put32(p->m_acc_hdl,
4796 			    &sge->FlagsLength, flags);
4797 			dmap++;
4798 			sge++;
4799 		}
4800 	}
4801 
4802 	/*
4803 	 * Sync DMA with the chain buffers that were just created
4804 	 */
4805 	(void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
4806 }
4807 
4808 static void
mptsas_ieee_sge_mainframe(mptsas_cmd_t * cmd,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl,uint_t cookiec,uint8_t end_flag)4809 mptsas_ieee_sge_mainframe(mptsas_cmd_t *cmd, pMpi2SCSIIORequest_t frame,
4810     ddi_acc_handle_t acc_hdl, uint_t cookiec, uint8_t end_flag)
4811 {
4812 	pMpi2IeeeSgeSimple64_t	ieeesge;
4813 	mptti_t			*dmap;
4814 	uint8_t			flags;
4815 
4816 	dmap = cmd->cmd_sg;
4817 
4818 	NDBG1(("mptsas_ieee_sge_mainframe: cookiec=%d, %s", cookiec,
4819 	    cmd->cmd_flags & CFLAG_DMASEND?"Out":"In"));
4820 
4821 	ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4822 	while (cookiec--) {
4823 		ddi_put32(acc_hdl,
4824 		    &ieeesge->Address.Low, dmap->addr.address64.Low);
4825 		ddi_put32(acc_hdl,
4826 		    &ieeesge->Address.High, dmap->addr.address64.High);
4827 		ddi_put32(acc_hdl, &ieeesge->Length,
4828 		    dmap->count);
4829 		NDBG1(("mptsas_ieee_sge_mainframe: len=%d", dmap->count));
4830 		flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
4831 		    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4832 
4833 		/*
4834 		 * If this is the last cookie, we set the flags
4835 		 * to indicate so
4836 		 */
4837 		if (cookiec == 0) {
4838 			flags |= end_flag;
4839 		}
4840 
4841 		ddi_put8(acc_hdl, &ieeesge->Flags, flags);
4842 		dmap++;
4843 		ieeesge++;
4844 	}
4845 }
4846 
4847 static void
mptsas_ieee_sge_chain(mptsas_t * mpt,mptsas_cmd_t * cmd,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl)4848 mptsas_ieee_sge_chain(mptsas_t *mpt, mptsas_cmd_t *cmd,
4849     pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
4850 {
4851 	pMpi2IeeeSgeSimple64_t	ieeesge;
4852 	pMpi25IeeeSgeChain64_t	ieeesgechain;
4853 	uint64_t		nframe_phys_addr;
4854 	uint_t			cookiec;
4855 	mptti_t			*dmap;
4856 	uint8_t			flags;
4857 
4858 	/*
4859 	 * Save the number of entries in the DMA
4860 	 * Scatter/Gather list
4861 	 */
4862 	cookiec = cmd->cmd_cookiec;
4863 
4864 	NDBG1(("mptsas_ieee_sge_chain: cookiec=%d", cookiec));
4865 
4866 	/*
4867 	 * Hereby we start to deal with multiple frames.
4868 	 * The process is as follows:
4869 	 * 1. Determine how many frames are needed for SGL element
4870 	 *    storage; Note that all frames are stored in contiguous
4871 	 *    memory space and in 64-bit DMA mode each element is
4872 	 *    4 double-words (16 bytes) long.
4873 	 * 2. Fill up the main frame. We need to do this separately
4874 	 *    since it contains the SCSI IO request header and needs
4875 	 *    dedicated processing. Note that the last 4 double-words
4876 	 *    of the SCSI IO header is for SGL element storage
4877 	 *    (MPI2_SGE_IO_UNION).
4878 	 * 3. Fill the chain element in the main frame, so the DMA
4879 	 *    engine can use the following frames.
4880 	 * 4. Enter a loop to fill the remaining frames. Note that the
4881 	 *    last frame contains no chain element.  The remaining
4882 	 *    frames go into the mpt SGL buffer allocated on the fly,
4883 	 *    not immediately following the main message frame, as in
4884 	 *    Gen1.
4885 	 * Restrictions:
4886 	 *    For 64-bit DMA, the simple element and chain element
4887 	 *    are both of 4 double-words (16 bytes) in size, even
4888 	 *    though all frames are stored in the first 4G of mem
4889 	 *    range and the higher 32-bits of the address are always 0.
4890 	 */
4891 	int			i, j, k, l, frames, sgemax;
4892 	int			temp;
4893 	uint8_t			chainflags;
4894 	uint32_t		chainlength;
4895 	mptsas_cache_frames_t	*p;
4896 
4897 	/*
4898 	 * Sgemax is the number of SGE's that will fit
4899 	 * each extra frame and frames is total
4900 	 * number of frames we'll need.  1 sge entry per
4901 	 * frame is reseverd for the chain element thus the -1 below.
4902 	 */
4903 	sgemax = ((mpt->m_req_frame_size / sizeof (MPI2_IEEE_SGE_SIMPLE64))
4904 	    - 1);
4905 	temp = (cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) / sgemax;
4906 
4907 	/*
4908 	 * A little check to see if we need to round up the number
4909 	 * of frames we need
4910 	 */
4911 	if ((cookiec - (MPTSAS_MAX_FRAME_SGES64(mpt) - 1)) - (temp *
4912 	    sgemax) > 1) {
4913 		frames = (temp + 1);
4914 	} else {
4915 		frames = temp;
4916 	}
4917 	NDBG1(("mptsas_ieee_sge_chain: temp=%d, frames=%d", temp, frames));
4918 	dmap = cmd->cmd_sg;
4919 	ieeesge = (pMpi2IeeeSgeSimple64_t)(&frame->SGL);
4920 
4921 	/*
4922 	 * First fill in the main frame
4923 	 */
4924 	j = MPTSAS_MAX_FRAME_SGES64(mpt) - 1;
4925 	mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl, j, 0);
4926 	dmap += j;
4927 	ieeesge += j;
4928 	j++;
4929 
4930 	/*
4931 	 * Fill in the chain element in the main frame.
4932 	 * About calculation on ChainOffset:
4933 	 * 1. Struct msg_scsi_io_request has 4 double-words (16 bytes)
4934 	 *    in the end reserved for SGL element storage
4935 	 *    (MPI2_SGE_IO_UNION); we should count it in our
4936 	 *    calculation.  See its definition in the header file.
4937 	 * 2. Constant j is the counter of the current SGL element
4938 	 *    that will be processed, and (j - 1) is the number of
4939 	 *    SGL elements that have been processed (stored in the
4940 	 *    main frame).
4941 	 * 3. ChainOffset value should be in units of quad-words (16
4942 	 *    bytes) so the last value should be divided by 16.
4943 	 */
4944 	ddi_put8(acc_hdl, &frame->ChainOffset,
4945 	    (sizeof (MPI2_SCSI_IO_REQUEST) -
4946 	    sizeof (MPI2_SGE_IO_UNION) +
4947 	    (j - 1) * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4948 	ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
4949 	chainflags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
4950 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
4951 	ddi_put8(acc_hdl, &ieeesgechain->Flags, chainflags);
4952 
4953 	/*
4954 	 * The size of the next frame is the accurate size of space
4955 	 * (in bytes) used to store the SGL elements. j is the counter
4956 	 * of SGL elements. (j - 1) is the number of SGL elements that
4957 	 * have been processed (stored in frames).
4958 	 */
4959 	if (frames >= 2) {
4960 		ASSERT(mpt->m_req_frame_size >=
4961 		    sizeof (MPI2_IEEE_SGE_SIMPLE64));
4962 		chainlength = mpt->m_req_frame_size /
4963 		    sizeof (MPI2_IEEE_SGE_SIMPLE64) *
4964 		    sizeof (MPI2_IEEE_SGE_SIMPLE64);
4965 	} else {
4966 		chainlength = ((cookiec - (j - 1)) *
4967 		    sizeof (MPI2_IEEE_SGE_SIMPLE64));
4968 	}
4969 
4970 	p = cmd->cmd_extra_frames;
4971 
4972 	ddi_put32(acc_hdl, &ieeesgechain->Length, chainlength);
4973 	ddi_put32(acc_hdl, &ieeesgechain->Address.Low, p->m_phys_addr);
4974 	ddi_put32(acc_hdl, &ieeesgechain->Address.High, p->m_phys_addr >> 32);
4975 
4976 	/*
4977 	 * If there are more than 2 frames left we have to
4978 	 * fill in the next chain offset to the location of
4979 	 * the chain element in the next frame.
4980 	 * sgemax is the number of simple elements in an extra
4981 	 * frame. Note that the value NextChainOffset should be
4982 	 * in double-words (4 bytes).
4983 	 */
4984 	if (frames >= 2) {
4985 		ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset,
4986 		    (sgemax * sizeof (MPI2_IEEE_SGE_SIMPLE64)) >> 4);
4987 	} else {
4988 		ddi_put8(acc_hdl, &ieeesgechain->NextChainOffset, 0);
4989 	}
4990 
4991 	/*
4992 	 * Jump to next frame;
4993 	 * Starting here, chain buffers go into the per command SGL.
4994 	 * This buffer is allocated when chain buffers are needed.
4995 	 */
4996 	ieeesge = (pMpi2IeeeSgeSimple64_t)p->m_frames_addr;
4997 	i = cookiec;
4998 
4999 	/*
5000 	 * Start filling in frames with SGE's.  If we
5001 	 * reach the end of frame and still have SGE's
5002 	 * to fill we need to add a chain element and
5003 	 * use another frame.  j will be our counter
5004 	 * for what cookie we are at and i will be
5005 	 * the total cookiec. k is the current frame
5006 	 */
5007 	for (k = 1; k <= frames; k++) {
5008 		for (l = 1; (l <= (sgemax + 1)) && (j <= i); j++, l++) {
5009 
5010 			/*
5011 			 * If we have reached the end of frame
5012 			 * and we have more SGE's to fill in
5013 			 * we have to fill the final entry
5014 			 * with a chain element and then
5015 			 * continue to the next frame
5016 			 */
5017 			if ((l == (sgemax + 1)) && (k != frames)) {
5018 				ieeesgechain = (pMpi25IeeeSgeChain64_t)ieeesge;
5019 				j--;
5020 				chainflags =
5021 				    MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
5022 				    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
5023 				ddi_put8(p->m_acc_hdl,
5024 				    &ieeesgechain->Flags, chainflags);
5025 				/*
5026 				 * k is the frame counter and (k + 1)
5027 				 * is the number of the next frame.
5028 				 * Note that frames are in contiguous
5029 				 * memory space.
5030 				 */
5031 				nframe_phys_addr = p->m_phys_addr +
5032 				    (mpt->m_req_frame_size * k);
5033 				ddi_put32(p->m_acc_hdl,
5034 				    &ieeesgechain->Address.Low,
5035 				    nframe_phys_addr);
5036 				ddi_put32(p->m_acc_hdl,
5037 				    &ieeesgechain->Address.High,
5038 				    nframe_phys_addr >> 32);
5039 
5040 				/*
5041 				 * If there are more than 2 frames left
5042 				 * we have to next chain offset to
5043 				 * the location of the chain element
5044 				 * in the next frame and fill in the
5045 				 * length of the next chain
5046 				 */
5047 				if ((frames - k) >= 2) {
5048 					ddi_put8(p->m_acc_hdl,
5049 					    &ieeesgechain->NextChainOffset,
5050 					    (sgemax *
5051 					    sizeof (MPI2_IEEE_SGE_SIMPLE64))
5052 					    >> 4);
5053 					ASSERT(mpt->m_req_frame_size >=
5054 					    sizeof (MPI2_IEEE_SGE_SIMPLE64));
5055 					ddi_put32(p->m_acc_hdl,
5056 					    &ieeesgechain->Length,
5057 					    mpt->m_req_frame_size /
5058 					    sizeof (MPI2_IEEE_SGE_SIMPLE64) *
5059 					    sizeof (MPI2_IEEE_SGE_SIMPLE64));
5060 				} else {
5061 					/*
5062 					 * This is the last frame. Set
5063 					 * the NextChainOffset to 0 and
5064 					 * Length is the total size of
5065 					 * all remaining simple elements
5066 					 */
5067 					ddi_put8(p->m_acc_hdl,
5068 					    &ieeesgechain->NextChainOffset,
5069 					    0);
5070 					ddi_put32(p->m_acc_hdl,
5071 					    &ieeesgechain->Length,
5072 					    (cookiec - j) *
5073 					    sizeof (MPI2_IEEE_SGE_SIMPLE64));
5074 				}
5075 
5076 				/* Jump to the next frame */
5077 				ieeesge = (pMpi2IeeeSgeSimple64_t)
5078 				    ((char *)p->m_frames_addr +
5079 				    (int)mpt->m_req_frame_size * k);
5080 
5081 				continue;
5082 			}
5083 
5084 			ddi_put32(p->m_acc_hdl,
5085 			    &ieeesge->Address.Low,
5086 			    dmap->addr.address64.Low);
5087 			ddi_put32(p->m_acc_hdl,
5088 			    &ieeesge->Address.High,
5089 			    dmap->addr.address64.High);
5090 			ddi_put32(p->m_acc_hdl,
5091 			    &ieeesge->Length, dmap->count);
5092 			flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
5093 			    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
5094 
5095 			/*
5096 			 * If we are at the end of the frame and
5097 			 * there is another frame to fill in
5098 			 * do we need to do anything?
5099 			 * if ((l == sgemax) && (k != frames)) {
5100 			 * }
5101 			 */
5102 
5103 			/*
5104 			 * If this is the final cookie set end of list.
5105 			 */
5106 			if (j == i) {
5107 				flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
5108 			}
5109 
5110 			ddi_put8(p->m_acc_hdl, &ieeesge->Flags, flags);
5111 			dmap++;
5112 			ieeesge++;
5113 		}
5114 	}
5115 
5116 	/*
5117 	 * Sync DMA with the chain buffers that were just created
5118 	 */
5119 	(void) ddi_dma_sync(p->m_dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
5120 }
5121 
5122 static void
mptsas_sge_setup(mptsas_t * mpt,mptsas_cmd_t * cmd,uint32_t * control,pMpi2SCSIIORequest_t frame,ddi_acc_handle_t acc_hdl)5123 mptsas_sge_setup(mptsas_t *mpt, mptsas_cmd_t *cmd, uint32_t *control,
5124     pMpi2SCSIIORequest_t frame, ddi_acc_handle_t acc_hdl)
5125 {
5126 	ASSERT(cmd->cmd_flags & CFLAG_DMAVALID);
5127 
5128 	NDBG1(("mptsas_sge_setup: cookiec=%d", cmd->cmd_cookiec));
5129 
5130 	/*
5131 	 * Set read/write bit in control.
5132 	 */
5133 	if (cmd->cmd_flags & CFLAG_DMASEND) {
5134 		*control |= MPI2_SCSIIO_CONTROL_WRITE;
5135 	} else {
5136 		*control |= MPI2_SCSIIO_CONTROL_READ;
5137 	}
5138 
5139 	ddi_put32(acc_hdl, &frame->DataLength, cmd->cmd_dmacount);
5140 
5141 	/*
5142 	 * We have 4 cases here.  First where we can fit all the
5143 	 * SG elements into the main frame, and the case
5144 	 * where we can't. The SG element is also different when using
5145 	 * MPI2.5 interface.
5146 	 * If we have more cookies than we can attach to a frame
5147 	 * we will need to use a chain element to point
5148 	 * a location of memory where the rest of the S/G
5149 	 * elements reside.
5150 	 */
5151 	if (cmd->cmd_cookiec <= MPTSAS_MAX_FRAME_SGES64(mpt)) {
5152 		if (mpt->m_MPI25) {
5153 			mptsas_ieee_sge_mainframe(cmd, frame, acc_hdl,
5154 			    cmd->cmd_cookiec,
5155 			    MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
5156 		} else {
5157 			mptsas_sge_mainframe(cmd, frame, acc_hdl,
5158 			    cmd->cmd_cookiec,
5159 			    ((uint32_t)(MPI2_SGE_FLAGS_LAST_ELEMENT
5160 			    | MPI2_SGE_FLAGS_END_OF_BUFFER
5161 			    | MPI2_SGE_FLAGS_END_OF_LIST) <<
5162 			    MPI2_SGE_FLAGS_SHIFT));
5163 		}
5164 	} else {
5165 		if (mpt->m_MPI25) {
5166 			mptsas_ieee_sge_chain(mpt, cmd, frame, acc_hdl);
5167 		} else {
5168 			mptsas_sge_chain(mpt, cmd, frame, acc_hdl);
5169 		}
5170 	}
5171 }
5172 
5173 /*
5174  * Interrupt handling
5175  * Utility routine.  Poll for status of a command sent to HBA
5176  * without interrupts (a FLAG_NOINTR command).
5177  */
5178 int
mptsas_poll(mptsas_t * mpt,mptsas_cmd_t * poll_cmd,int polltime)5179 mptsas_poll(mptsas_t *mpt, mptsas_cmd_t *poll_cmd, int polltime)
5180 {
5181 	int	rval = TRUE;
5182 
5183 	NDBG5(("mptsas_poll: cmd=0x%p", (void *)poll_cmd));
5184 
5185 	if ((poll_cmd->cmd_flags & CFLAG_TM_CMD) == 0) {
5186 		mptsas_restart_hba(mpt);
5187 	}
5188 
5189 	/*
5190 	 * Wait, using drv_usecwait(), long enough for the command to
5191 	 * reasonably return from the target if the target isn't
5192 	 * "dead".  A polled command may well be sent from scsi_poll, and
5193 	 * there are retries built in to scsi_poll if the transport
5194 	 * accepted the packet (TRAN_ACCEPT).  scsi_poll waits 1 second
5195 	 * and retries the transport up to scsi_poll_busycnt times
5196 	 * (currently 60) if
5197 	 * 1. pkt_reason is CMD_INCOMPLETE and pkt_state is 0, or
5198 	 * 2. pkt_reason is CMD_CMPLT and *pkt_scbp has STATUS_BUSY
5199 	 *
5200 	 * limit the waiting to avoid a hang in the event that the
5201 	 * cmd never gets started but we are still receiving interrupts
5202 	 */
5203 	while (!(poll_cmd->cmd_flags & CFLAG_FINISHED)) {
5204 		if (mptsas_wait_intr(mpt, polltime) == FALSE) {
5205 			NDBG5(("mptsas_poll: command incomplete"));
5206 			rval = FALSE;
5207 			break;
5208 		}
5209 	}
5210 
5211 	if (rval == FALSE) {
5212 
5213 		/*
5214 		 * this isn't supposed to happen, the hba must be wedged
5215 		 * Mark this cmd as a timeout.
5216 		 */
5217 		mptsas_set_pkt_reason(mpt, poll_cmd, CMD_TIMEOUT,
5218 		    (STAT_TIMEOUT|STAT_ABORTED));
5219 
5220 		if (poll_cmd->cmd_queued == FALSE) {
5221 
5222 			NDBG5(("mptsas_poll: not on waitq"));
5223 
5224 			poll_cmd->cmd_pkt->pkt_state |=
5225 			    (STATE_GOT_BUS|STATE_GOT_TARGET|STATE_SENT_CMD);
5226 		} else {
5227 
5228 			/* find and remove it from the waitq */
5229 			NDBG5(("mptsas_poll: delete from waitq"));
5230 			mptsas_waitq_delete(mpt, poll_cmd);
5231 		}
5232 
5233 	}
5234 	mptsas_fma_check(mpt, poll_cmd);
5235 	NDBG5(("mptsas_poll: done"));
5236 	return (rval);
5237 }
5238 
5239 /*
5240  * Used for polling cmds and TM function
5241  */
5242 static int
mptsas_wait_intr(mptsas_t * mpt,int polltime)5243 mptsas_wait_intr(mptsas_t *mpt, int polltime)
5244 {
5245 	int				cnt;
5246 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
5247 	uint32_t			int_mask;
5248 
5249 	NDBG5(("mptsas_wait_intr"));
5250 
5251 	mpt->m_polled_intr = 1;
5252 
5253 	/*
5254 	 * Get the current interrupt mask and disable interrupts.  When
5255 	 * re-enabling ints, set mask to saved value.
5256 	 */
5257 	int_mask = mptsas_hirrd(mpt, &mpt->m_reg->HostInterruptMask);
5258 	MPTSAS_DISABLE_INTR(mpt);
5259 
5260 	/*
5261 	 * Keep polling for at least (polltime * 1000) seconds
5262 	 */
5263 	for (cnt = 0; cnt < polltime; cnt++) {
5264 		(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
5265 		    DDI_DMA_SYNC_FORCPU);
5266 
5267 		reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
5268 		    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
5269 
5270 		if (ddi_get32(mpt->m_acc_post_queue_hdl,
5271 		    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
5272 		    ddi_get32(mpt->m_acc_post_queue_hdl,
5273 		    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
5274 			drv_usecwait(1000);
5275 			continue;
5276 		}
5277 
5278 		/*
5279 		 * The reply is valid, process it according to its
5280 		 * type.
5281 		 */
5282 		mptsas_process_intr(mpt, reply_desc_union);
5283 
5284 		if (++mpt->m_post_index == mpt->m_post_queue_depth) {
5285 			mpt->m_post_index = 0;
5286 		}
5287 
5288 		/*
5289 		 * Update the global reply index
5290 		 */
5291 		ddi_put32(mpt->m_datap,
5292 		    &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
5293 		mpt->m_polled_intr = 0;
5294 
5295 		/*
5296 		 * Re-enable interrupts and quit.
5297 		 */
5298 		ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask,
5299 		    int_mask);
5300 		return (TRUE);
5301 
5302 	}
5303 
5304 	/*
5305 	 * Clear polling flag, re-enable interrupts and quit.
5306 	 */
5307 	mpt->m_polled_intr = 0;
5308 	ddi_put32(mpt->m_datap, &mpt->m_reg->HostInterruptMask, int_mask);
5309 	return (FALSE);
5310 }
5311 
5312 static void
mptsas_handle_scsi_io_success(mptsas_t * mpt,pMpi2ReplyDescriptorsUnion_t reply_desc)5313 mptsas_handle_scsi_io_success(mptsas_t *mpt,
5314     pMpi2ReplyDescriptorsUnion_t reply_desc)
5315 {
5316 	pMpi2SCSIIOSuccessReplyDescriptor_t	scsi_io_success;
5317 	uint16_t				SMID;
5318 	mptsas_slots_t				*slots = mpt->m_active;
5319 	mptsas_cmd_t				*cmd = NULL;
5320 	struct scsi_pkt				*pkt;
5321 
5322 	ASSERT(mutex_owned(&mpt->m_mutex));
5323 
5324 	scsi_io_success = (pMpi2SCSIIOSuccessReplyDescriptor_t)reply_desc;
5325 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &scsi_io_success->SMID);
5326 
5327 	/*
5328 	 * This is a success reply so just complete the IO.  First, do a sanity
5329 	 * check on the SMID.  The final slot is used for TM requests, which
5330 	 * would not come into this reply handler.
5331 	 */
5332 	if ((SMID == 0) || (SMID > slots->m_n_normal)) {
5333 		mptsas_log(mpt, CE_WARN, "?Received invalid SMID of %d\n",
5334 		    SMID);
5335 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5336 		return;
5337 	}
5338 
5339 	cmd = slots->m_slot[SMID];
5340 
5341 	/*
5342 	 * print warning and return if the slot is empty
5343 	 */
5344 	if (cmd == NULL) {
5345 		mptsas_log(mpt, CE_WARN, "?NULL command for successful SCSI IO "
5346 		    "in slot %d", SMID);
5347 		return;
5348 	}
5349 
5350 	pkt = CMD2PKT(cmd);
5351 	pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
5352 	    STATE_GOT_STATUS);
5353 	if (cmd->cmd_flags & CFLAG_DMAVALID) {
5354 		pkt->pkt_state |= STATE_XFERRED_DATA;
5355 	}
5356 	pkt->pkt_resid = 0;
5357 
5358 	if (cmd->cmd_flags & CFLAG_PASSTHRU) {
5359 		cmd->cmd_flags |= CFLAG_FINISHED;
5360 		cv_broadcast(&mpt->m_passthru_cv);
5361 		return;
5362 	} else {
5363 		mptsas_remove_cmd(mpt, cmd);
5364 	}
5365 
5366 	if (cmd->cmd_flags & CFLAG_RETRY) {
5367 		/*
5368 		 * The target returned QFULL or busy, do not add tihs
5369 		 * pkt to the doneq since the hba will retry
5370 		 * this cmd.
5371 		 *
5372 		 * The pkt has already been resubmitted in
5373 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5374 		 * Remove this cmd_flag here.
5375 		 */
5376 		cmd->cmd_flags &= ~CFLAG_RETRY;
5377 	} else {
5378 		mptsas_doneq_add(mpt, cmd);
5379 	}
5380 }
5381 
5382 static void
mptsas_handle_address_reply(mptsas_t * mpt,pMpi2ReplyDescriptorsUnion_t reply_desc)5383 mptsas_handle_address_reply(mptsas_t *mpt,
5384     pMpi2ReplyDescriptorsUnion_t reply_desc)
5385 {
5386 	pMpi2AddressReplyDescriptor_t	address_reply;
5387 	pMPI2DefaultReply_t		reply;
5388 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
5389 	uint32_t			reply_addr, reply_frame_dma_baseaddr;
5390 	uint16_t			SMID, iocstatus;
5391 	mptsas_slots_t			*slots = mpt->m_active;
5392 	mptsas_cmd_t			*cmd = NULL;
5393 	uint8_t				function, buffer_type;
5394 	m_replyh_arg_t			*args;
5395 	int				reply_frame_no;
5396 
5397 	ASSERT(mutex_owned(&mpt->m_mutex));
5398 
5399 	address_reply = (pMpi2AddressReplyDescriptor_t)reply_desc;
5400 	reply_addr = ddi_get32(mpt->m_acc_post_queue_hdl,
5401 	    &address_reply->ReplyFrameAddress);
5402 	SMID = ddi_get16(mpt->m_acc_post_queue_hdl, &address_reply->SMID);
5403 
5404 	/*
5405 	 * If reply frame is not in the proper range we should ignore this
5406 	 * message and exit the interrupt handler.
5407 	 */
5408 	reply_frame_dma_baseaddr = mpt->m_reply_frame_dma_addr & 0xffffffffu;
5409 	if ((reply_addr < reply_frame_dma_baseaddr) ||
5410 	    (reply_addr >= (reply_frame_dma_baseaddr +
5411 	    (mpt->m_reply_frame_size * mpt->m_max_replies))) ||
5412 	    ((reply_addr - reply_frame_dma_baseaddr) %
5413 	    mpt->m_reply_frame_size != 0)) {
5414 		mptsas_log(mpt, CE_WARN, "?Received invalid reply frame "
5415 		    "address 0x%x\n", reply_addr);
5416 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
5417 		return;
5418 	}
5419 
5420 	(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
5421 	    DDI_DMA_SYNC_FORCPU);
5422 	reply = (pMPI2DefaultReply_t)(mpt->m_reply_frame + (reply_addr -
5423 	    reply_frame_dma_baseaddr));
5424 	function = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->Function);
5425 
5426 	NDBG31(("mptsas_handle_address_reply: function 0x%x, reply_addr=0x%x",
5427 	    function, reply_addr));
5428 
5429 	/*
5430 	 * don't get slot information and command for events since these values
5431 	 * don't exist
5432 	 */
5433 	if ((function != MPI2_FUNCTION_EVENT_NOTIFICATION) &&
5434 	    (function != MPI2_FUNCTION_DIAG_BUFFER_POST)) {
5435 		/*
5436 		 * This could be a TM reply, which use the last allocated SMID,
5437 		 * so allow for that.
5438 		 */
5439 		if ((SMID == 0) || (SMID > (slots->m_n_normal + 1))) {
5440 			mptsas_log(mpt, CE_WARN, "?Received invalid SMID of "
5441 			    "%d\n", SMID);
5442 			ddi_fm_service_impact(mpt->m_dip,
5443 			    DDI_SERVICE_UNAFFECTED);
5444 			return;
5445 		}
5446 
5447 		cmd = slots->m_slot[SMID];
5448 
5449 		/*
5450 		 * print warning and return if the slot is empty
5451 		 */
5452 		if (cmd == NULL) {
5453 			mptsas_log(mpt, CE_WARN, "?NULL command for address "
5454 			    "reply in slot %d", SMID);
5455 			return;
5456 		}
5457 		if ((cmd->cmd_flags &
5458 		    (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
5459 			cmd->cmd_rfm = reply_addr;
5460 			cmd->cmd_flags |= CFLAG_FINISHED;
5461 			cv_broadcast(&mpt->m_passthru_cv);
5462 			cv_broadcast(&mpt->m_config_cv);
5463 			cv_broadcast(&mpt->m_fw_diag_cv);
5464 			return;
5465 		} else if (!(cmd->cmd_flags & CFLAG_FW_CMD)) {
5466 			mptsas_remove_cmd(mpt, cmd);
5467 		}
5468 		NDBG31(("\t\tmptsas_process_intr: slot=%d", SMID));
5469 	}
5470 	/*
5471 	 * Depending on the function, we need to handle
5472 	 * the reply frame (and cmd) differently.
5473 	 */
5474 	switch (function) {
5475 	case MPI2_FUNCTION_SCSI_IO_REQUEST:
5476 		mptsas_check_scsi_io_error(mpt, (pMpi2SCSIIOReply_t)reply, cmd);
5477 		break;
5478 	case MPI2_FUNCTION_SCSI_TASK_MGMT:
5479 		cmd->cmd_rfm = reply_addr;
5480 		mptsas_check_task_mgt(mpt, (pMpi2SCSIManagementReply_t)reply,
5481 		    cmd);
5482 		break;
5483 	case MPI2_FUNCTION_FW_DOWNLOAD:
5484 		cmd->cmd_flags |= CFLAG_FINISHED;
5485 		cv_signal(&mpt->m_fw_cv);
5486 		break;
5487 	case MPI2_FUNCTION_EVENT_NOTIFICATION:
5488 		reply_frame_no = (reply_addr - reply_frame_dma_baseaddr) /
5489 		    mpt->m_reply_frame_size;
5490 		args = &mpt->m_replyh_args[reply_frame_no];
5491 		args->mpt = (void *)mpt;
5492 		args->rfm = reply_addr;
5493 
5494 		/*
5495 		 * Record the event if its type is enabled in
5496 		 * this mpt instance by ioctl.
5497 		 */
5498 		mptsas_record_event(args);
5499 
5500 		/*
5501 		 * Handle time critical events
5502 		 * NOT_RESPONDING/ADDED only now
5503 		 */
5504 		if (mptsas_handle_event_sync(args) == DDI_SUCCESS) {
5505 			/*
5506 			 * Would not return main process,
5507 			 * just let taskq resolve ack action
5508 			 * and ack would be sent in taskq thread
5509 			 */
5510 			NDBG20(("send mptsas_handle_event_sync success"));
5511 		}
5512 
5513 		if (mpt->m_in_reset) {
5514 			NDBG20(("dropping event received during reset"));
5515 			return;
5516 		}
5517 
5518 		if ((ddi_taskq_dispatch(mpt->m_event_taskq, mptsas_handle_event,
5519 		    (void *)args, DDI_NOSLEEP)) != DDI_SUCCESS) {
5520 			mptsas_log(mpt, CE_WARN, "No memory available"
5521 			"for dispatch taskq");
5522 			/*
5523 			 * Return the reply frame to the free queue.
5524 			 */
5525 			ddi_put32(mpt->m_acc_free_queue_hdl,
5526 			    &((uint32_t *)(void *)
5527 			    mpt->m_free_queue)[mpt->m_free_index], reply_addr);
5528 			(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5529 			    DDI_DMA_SYNC_FORDEV);
5530 			if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5531 				mpt->m_free_index = 0;
5532 			}
5533 
5534 			ddi_put32(mpt->m_datap,
5535 			    &mpt->m_reg->ReplyFreeHostIndex, mpt->m_free_index);
5536 		}
5537 		return;
5538 	case MPI2_FUNCTION_DIAG_BUFFER_POST:
5539 		/*
5540 		 * If SMID is 0, this implies that the reply is due to a
5541 		 * release function with a status that the buffer has been
5542 		 * released.  Set the buffer flags accordingly.
5543 		 */
5544 		if (SMID == 0) {
5545 			iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
5546 			    &reply->IOCStatus);
5547 			buffer_type = ddi_get8(mpt->m_acc_reply_frame_hdl,
5548 			    &(((pMpi2DiagBufferPostReply_t)reply)->BufferType));
5549 			if (iocstatus == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) {
5550 				pBuffer =
5551 				    &mpt->m_fw_diag_buffer_list[buffer_type];
5552 				pBuffer->valid_data = TRUE;
5553 				pBuffer->owned_by_firmware = FALSE;
5554 				pBuffer->immediate = FALSE;
5555 			}
5556 		} else {
5557 			/*
5558 			 * Normal handling of diag post reply with SMID.
5559 			 */
5560 			cmd = slots->m_slot[SMID];
5561 
5562 			/*
5563 			 * print warning and return if the slot is empty
5564 			 */
5565 			if (cmd == NULL) {
5566 				mptsas_log(mpt, CE_WARN, "?NULL command for "
5567 				    "address reply in slot %d", SMID);
5568 				return;
5569 			}
5570 			cmd->cmd_rfm = reply_addr;
5571 			cmd->cmd_flags |= CFLAG_FINISHED;
5572 			cv_broadcast(&mpt->m_fw_diag_cv);
5573 		}
5574 		return;
5575 	default:
5576 		mptsas_log(mpt, CE_WARN, "Unknown function 0x%x ", function);
5577 		break;
5578 	}
5579 
5580 	/*
5581 	 * Return the reply frame to the free queue.
5582 	 */
5583 	ddi_put32(mpt->m_acc_free_queue_hdl,
5584 	    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
5585 	    reply_addr);
5586 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
5587 	    DDI_DMA_SYNC_FORDEV);
5588 	if (++mpt->m_free_index == mpt->m_free_queue_depth) {
5589 		mpt->m_free_index = 0;
5590 	}
5591 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
5592 	    mpt->m_free_index);
5593 
5594 	if (cmd->cmd_flags & CFLAG_FW_CMD)
5595 		return;
5596 
5597 	if (cmd->cmd_flags & CFLAG_RETRY) {
5598 		/*
5599 		 * The target returned QFULL or busy, do not add this
5600 		 * pkt to the doneq since the hba will retry
5601 		 * this cmd.
5602 		 *
5603 		 * The pkt has already been resubmitted in
5604 		 * mptsas_handle_qfull() or in mptsas_check_scsi_io_error().
5605 		 * Remove this cmd_flag here.
5606 		 */
5607 		cmd->cmd_flags &= ~CFLAG_RETRY;
5608 	} else {
5609 		mptsas_doneq_add(mpt, cmd);
5610 	}
5611 }
5612 
5613 #ifdef MPTSAS_DEBUG
5614 static uint8_t mptsas_last_sense[256];
5615 #endif
5616 
5617 static void
mptsas_check_scsi_io_error(mptsas_t * mpt,pMpi2SCSIIOReply_t reply,mptsas_cmd_t * cmd)5618 mptsas_check_scsi_io_error(mptsas_t *mpt, pMpi2SCSIIOReply_t reply,
5619     mptsas_cmd_t *cmd)
5620 {
5621 	uint8_t			scsi_status, scsi_state;
5622 	uint16_t		ioc_status, cmd_rqs_len;
5623 	uint32_t		xferred, sensecount, responsedata, loginfo = 0;
5624 	struct scsi_pkt		*pkt;
5625 	struct scsi_arq_status	*arqstat;
5626 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
5627 	uint8_t			*sensedata = NULL;
5628 	uint64_t		sas_wwn;
5629 	uint8_t			phy;
5630 	char			wwn_str[MPTSAS_WWN_STRLEN];
5631 
5632 	scsi_status = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIStatus);
5633 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5634 	scsi_state = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->SCSIState);
5635 	xferred = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->TransferCount);
5636 	sensecount = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->SenseCount);
5637 	responsedata = ddi_get32(mpt->m_acc_reply_frame_hdl,
5638 	    &reply->ResponseInfo);
5639 
5640 	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
5641 		sas_wwn = ptgt->m_addr.mta_wwn;
5642 		phy = ptgt->m_phynum;
5643 		if (sas_wwn == 0) {
5644 			(void) sprintf(wwn_str, "p%x", phy);
5645 		} else {
5646 			(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
5647 		}
5648 		loginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
5649 		    &reply->IOCLogInfo);
5650 		mptsas_log(mpt, CE_NOTE,
5651 		    "?Log info 0x%x received for target %d %s.\n"
5652 		    "\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5653 		    loginfo, Tgt(cmd), wwn_str, scsi_status, ioc_status,
5654 		    scsi_state);
5655 	}
5656 
5657 	NDBG31(("\t\tscsi_status=0x%x, ioc_status=0x%x, scsi_state=0x%x",
5658 	    scsi_status, ioc_status, scsi_state));
5659 
5660 	pkt = CMD2PKT(cmd);
5661 	*(pkt->pkt_scbp) = scsi_status;
5662 
5663 	if (loginfo == 0x31170000) {
5664 		/*
5665 		 * if loginfo PL_LOGINFO_CODE_IO_DEVICE_MISSING_DELAY_RETRY
5666 		 * 0x31170000 comes, that means the device missing delay
5667 		 * is in progressing, the command need retry later.
5668 		 */
5669 		*(pkt->pkt_scbp) = STATUS_BUSY;
5670 		return;
5671 	}
5672 
5673 	if ((scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) &&
5674 	    ((ioc_status & MPI2_IOCSTATUS_MASK) ==
5675 	    MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE)) {
5676 		pkt->pkt_reason = CMD_INCOMPLETE;
5677 		pkt->pkt_state |= STATE_GOT_BUS;
5678 		if (ptgt->m_reset_delay == 0) {
5679 			mptsas_set_throttle(mpt, ptgt,
5680 			    DRAIN_THROTTLE);
5681 		}
5682 		return;
5683 	}
5684 
5685 	if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
5686 		responsedata &= 0x000000FF;
5687 		if (responsedata & MPTSAS_SCSI_RESPONSE_CODE_TLR_OFF) {
5688 			mptsas_log(mpt, CE_NOTE, "Do not support the TLR\n");
5689 			pkt->pkt_reason = CMD_TLR_OFF;
5690 			return;
5691 		}
5692 	}
5693 
5694 
5695 	switch (scsi_status) {
5696 	case MPI2_SCSI_STATUS_CHECK_CONDITION:
5697 		pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5698 		arqstat = (void*)(pkt->pkt_scbp);
5699 		arqstat->sts_rqpkt_status = *((struct scsi_status *)
5700 		    (pkt->pkt_scbp));
5701 		pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
5702 		    STATE_SENT_CMD | STATE_GOT_STATUS | STATE_ARQ_DONE);
5703 		if (cmd->cmd_flags & CFLAG_XARQ) {
5704 			pkt->pkt_state |= STATE_XARQ_DONE;
5705 		}
5706 		if (pkt->pkt_resid != cmd->cmd_dmacount) {
5707 			pkt->pkt_state |= STATE_XFERRED_DATA;
5708 		}
5709 		arqstat->sts_rqpkt_reason = pkt->pkt_reason;
5710 		arqstat->sts_rqpkt_state  = pkt->pkt_state;
5711 		arqstat->sts_rqpkt_state |= STATE_XFERRED_DATA;
5712 		arqstat->sts_rqpkt_statistics = pkt->pkt_statistics;
5713 		sensedata = (uint8_t *)&arqstat->sts_sensedata;
5714 		cmd_rqs_len = cmd->cmd_extrqslen ?
5715 		    cmd->cmd_extrqslen : cmd->cmd_rqslen;
5716 		(void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
5717 		    DDI_DMA_SYNC_FORKERNEL);
5718 #ifdef MPTSAS_DEBUG
5719 		bcopy(cmd->cmd_arq_buf, mptsas_last_sense,
5720 		    ((cmd_rqs_len >= sizeof (mptsas_last_sense)) ?
5721 		    sizeof (mptsas_last_sense):cmd_rqs_len));
5722 #endif
5723 		bcopy((uchar_t *)cmd->cmd_arq_buf, sensedata,
5724 		    ((cmd_rqs_len >= sensecount) ? sensecount :
5725 		    cmd_rqs_len));
5726 		arqstat->sts_rqpkt_resid = (cmd_rqs_len - sensecount);
5727 		cmd->cmd_flags |= CFLAG_CMDARQ;
5728 		/*
5729 		 * Set proper status for pkt if autosense was valid
5730 		 */
5731 		if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
5732 			struct scsi_status zero_status = { 0 };
5733 			arqstat->sts_rqpkt_status = zero_status;
5734 		}
5735 
5736 		/*
5737 		 * ASC=0x47 is parity error
5738 		 * ASC=0x48 is initiator detected error received
5739 		 */
5740 		if ((scsi_sense_key(sensedata) == KEY_ABORTED_COMMAND) &&
5741 		    ((scsi_sense_asc(sensedata) == 0x47) ||
5742 		    (scsi_sense_asc(sensedata) == 0x48))) {
5743 			mptsas_log(mpt, CE_NOTE, "Aborted_command!");
5744 		}
5745 
5746 		/*
5747 		 * ASC/ASCQ=0x3F/0x0E means report_luns data changed
5748 		 * ASC/ASCQ=0x25/0x00 means invalid lun
5749 		 */
5750 		if (((scsi_sense_key(sensedata) == KEY_UNIT_ATTENTION) &&
5751 		    (scsi_sense_asc(sensedata) == 0x3F) &&
5752 		    (scsi_sense_ascq(sensedata) == 0x0E)) ||
5753 		    ((scsi_sense_key(sensedata) == KEY_ILLEGAL_REQUEST) &&
5754 		    (scsi_sense_asc(sensedata) == 0x25) &&
5755 		    (scsi_sense_ascq(sensedata) == 0x00))) {
5756 			mptsas_topo_change_list_t *topo_node = NULL;
5757 
5758 			topo_node = kmem_zalloc(
5759 			    sizeof (mptsas_topo_change_list_t),
5760 			    KM_NOSLEEP);
5761 			if (topo_node == NULL) {
5762 				mptsas_log(mpt, CE_NOTE, "No memory"
5763 				    "resource for handle SAS dynamic"
5764 				    "reconfigure.\n");
5765 				break;
5766 			}
5767 			topo_node->mpt = mpt;
5768 			topo_node->event = MPTSAS_DR_EVENT_RECONFIG_TARGET;
5769 			topo_node->un.phymask = ptgt->m_addr.mta_phymask;
5770 			topo_node->devhdl = ptgt->m_devhdl;
5771 			topo_node->object = (void *)ptgt;
5772 			topo_node->flags = MPTSAS_TOPO_FLAG_LUN_ASSOCIATED;
5773 
5774 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
5775 			    mptsas_handle_dr,
5776 			    (void *)topo_node,
5777 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
5778 				kmem_free(topo_node,
5779 				    sizeof (mptsas_topo_change_list_t));
5780 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq"
5781 				    "for handle SAS dynamic reconfigure"
5782 				    "failed. \n");
5783 			}
5784 		}
5785 		break;
5786 	case MPI2_SCSI_STATUS_GOOD:
5787 		switch (ioc_status & MPI2_IOCSTATUS_MASK) {
5788 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
5789 			pkt->pkt_reason = CMD_DEV_GONE;
5790 			pkt->pkt_state |= STATE_GOT_BUS;
5791 			if (ptgt->m_reset_delay == 0) {
5792 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5793 			}
5794 			NDBG31(("lost disk for target%d, command:%x",
5795 			    Tgt(cmd), pkt->pkt_cdbp[0]));
5796 			break;
5797 		case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
5798 			NDBG31(("data overrun: xferred=%d", xferred));
5799 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5800 			pkt->pkt_reason = CMD_DATA_OVR;
5801 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5802 			    | STATE_SENT_CMD | STATE_GOT_STATUS
5803 			    | STATE_XFERRED_DATA);
5804 			pkt->pkt_resid = 0;
5805 			break;
5806 		case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
5807 		case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
5808 			NDBG31(("data underrun: xferred=%d", xferred));
5809 			NDBG31(("dmacount=%d", cmd->cmd_dmacount));
5810 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET
5811 			    | STATE_SENT_CMD | STATE_GOT_STATUS);
5812 			pkt->pkt_resid = (cmd->cmd_dmacount - xferred);
5813 			if (pkt->pkt_resid != cmd->cmd_dmacount) {
5814 				pkt->pkt_state |= STATE_XFERRED_DATA;
5815 			}
5816 			break;
5817 		case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
5818 			if (cmd->cmd_active_expiration <= gethrtime()) {
5819 				/*
5820 				 * When timeout requested, propagate
5821 				 * proper reason and statistics to
5822 				 * target drivers.
5823 				 */
5824 				mptsas_set_pkt_reason(mpt, cmd, CMD_TIMEOUT,
5825 				    STAT_BUS_RESET | STAT_TIMEOUT);
5826 			} else {
5827 				mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
5828 				    STAT_BUS_RESET);
5829 			}
5830 			break;
5831 		case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
5832 		case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
5833 			mptsas_set_pkt_reason(mpt,
5834 			    cmd, CMD_RESET, STAT_DEV_RESET);
5835 			break;
5836 		case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
5837 		case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
5838 			pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET);
5839 			mptsas_set_pkt_reason(mpt,
5840 			    cmd, CMD_TERMINATED, STAT_TERMINATED);
5841 			break;
5842 		case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
5843 		case MPI2_IOCSTATUS_BUSY:
5844 			/*
5845 			 * set throttles to drain
5846 			 */
5847 			for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
5848 			    ptgt = refhash_next(mpt->m_targets, ptgt)) {
5849 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
5850 			}
5851 
5852 			/*
5853 			 * retry command
5854 			 */
5855 			cmd->cmd_flags |= CFLAG_RETRY;
5856 			cmd->cmd_pkt_flags |= FLAG_HEAD;
5857 
5858 			(void) mptsas_accept_pkt(mpt, cmd);
5859 			break;
5860 		default:
5861 			mptsas_log(mpt, CE_WARN,
5862 			    "unknown ioc_status = %x\n", ioc_status);
5863 			mptsas_log(mpt, CE_CONT, "scsi_state = %x, transfer "
5864 			    "count = %x, scsi_status = %x", scsi_state,
5865 			    xferred, scsi_status);
5866 			break;
5867 		}
5868 		break;
5869 	case MPI2_SCSI_STATUS_TASK_SET_FULL:
5870 		mptsas_handle_qfull(mpt, cmd);
5871 		break;
5872 	case MPI2_SCSI_STATUS_BUSY:
5873 		NDBG31(("scsi_status busy received"));
5874 		break;
5875 	case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
5876 		NDBG31(("scsi_status reservation conflict received"));
5877 		break;
5878 	default:
5879 		mptsas_log(mpt, CE_WARN, "scsi_status=%x, ioc_status=%x\n",
5880 		    scsi_status, ioc_status);
5881 		mptsas_log(mpt, CE_WARN,
5882 		    "mptsas_process_intr: invalid scsi status\n");
5883 		break;
5884 	}
5885 }
5886 
5887 static void
mptsas_check_task_mgt(mptsas_t * mpt,pMpi2SCSIManagementReply_t reply,mptsas_cmd_t * cmd)5888 mptsas_check_task_mgt(mptsas_t *mpt, pMpi2SCSIManagementReply_t reply,
5889     mptsas_cmd_t *cmd)
5890 {
5891 	uint8_t		task_type;
5892 	uint16_t	ioc_status;
5893 	uint32_t	log_info;
5894 	uint16_t	dev_handle;
5895 	struct scsi_pkt *pkt = CMD2PKT(cmd);
5896 
5897 	task_type = ddi_get8(mpt->m_acc_reply_frame_hdl, &reply->TaskType);
5898 	ioc_status = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->IOCStatus);
5899 	log_info = ddi_get32(mpt->m_acc_reply_frame_hdl, &reply->IOCLogInfo);
5900 	dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl, &reply->DevHandle);
5901 
5902 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
5903 		mptsas_log(mpt, CE_WARN, "mptsas_check_task_mgt: Task 0x%x "
5904 		    "failed. IOCStatus=0x%x IOCLogInfo=0x%x target=%d\n",
5905 		    task_type, ioc_status, log_info, dev_handle);
5906 		pkt->pkt_reason = CMD_INCOMPLETE;
5907 		return;
5908 	}
5909 
5910 	switch (task_type) {
5911 	case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
5912 	case MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET:
5913 	case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
5914 	case MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA:
5915 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET:
5916 	case MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION:
5917 		break;
5918 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
5919 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
5920 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
5921 		/*
5922 		 * Check for invalid DevHandle of 0 in case application
5923 		 * sends bad command.  DevHandle of 0 could cause problems.
5924 		 */
5925 		if (dev_handle == 0) {
5926 			mptsas_log(mpt, CE_WARN, "!Can't flush target with"
5927 			    " DevHandle of 0.");
5928 		} else {
5929 			mptsas_flush_target(mpt, dev_handle, Lun(cmd),
5930 			    task_type);
5931 		}
5932 		break;
5933 	default:
5934 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
5935 		    task_type);
5936 		mptsas_log(mpt, CE_WARN, "ioc status = %x", ioc_status);
5937 		break;
5938 	}
5939 }
5940 
5941 static void
mptsas_doneq_thread(mptsas_doneq_thread_arg_t * arg)5942 mptsas_doneq_thread(mptsas_doneq_thread_arg_t *arg)
5943 {
5944 	mptsas_t			*mpt = arg->mpt;
5945 	uint64_t			t = arg->t;
5946 	mptsas_cmd_t			*cmd;
5947 	struct scsi_pkt			*pkt;
5948 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
5949 
5950 	mutex_enter(&item->mutex);
5951 	while (item->flag & MPTSAS_DONEQ_THREAD_ACTIVE) {
5952 		if (!item->doneq) {
5953 			cv_wait(&item->cv, &item->mutex);
5954 		}
5955 		pkt = NULL;
5956 		if ((cmd = mptsas_doneq_thread_rm(mpt, t)) != NULL) {
5957 			cmd->cmd_flags |= CFLAG_COMPLETED;
5958 			pkt = CMD2PKT(cmd);
5959 		}
5960 		mutex_exit(&item->mutex);
5961 		if (pkt) {
5962 			mptsas_pkt_comp(pkt, cmd);
5963 		}
5964 		mutex_enter(&item->mutex);
5965 	}
5966 	mutex_exit(&item->mutex);
5967 	mutex_enter(&mpt->m_doneq_mutex);
5968 	mpt->m_doneq_thread_n--;
5969 	cv_broadcast(&mpt->m_doneq_thread_cv);
5970 	mutex_exit(&mpt->m_doneq_mutex);
5971 }
5972 
5973 
5974 /*
5975  * mpt interrupt handler.
5976  */
5977 static uint_t
mptsas_intr(caddr_t arg1,caddr_t arg2)5978 mptsas_intr(caddr_t arg1, caddr_t arg2)
5979 {
5980 	mptsas_t			*mpt = (void *)arg1;
5981 	pMpi2ReplyDescriptorsUnion_t	reply_desc_union;
5982 	uchar_t				did_reply = FALSE;
5983 
5984 	NDBG1(("mptsas_intr: arg1 0x%p arg2 0x%p", (void *)arg1, (void *)arg2));
5985 
5986 	mutex_enter(&mpt->m_mutex);
5987 
5988 	/*
5989 	 * If interrupts are shared by two channels then check whether this
5990 	 * interrupt is genuinely for this channel by making sure first the
5991 	 * chip is in high power state.
5992 	 */
5993 	if ((mpt->m_options & MPTSAS_OPT_PM) &&
5994 	    (mpt->m_power_level != PM_LEVEL_D0)) {
5995 		mutex_exit(&mpt->m_mutex);
5996 		return (DDI_INTR_UNCLAIMED);
5997 	}
5998 
5999 	/*
6000 	 * If polling, interrupt was triggered by some shared interrupt because
6001 	 * IOC interrupts are disabled during polling, so polling routine will
6002 	 * handle any replies.  Considering this, if polling is happening,
6003 	 * return with interrupt unclaimed.
6004 	 */
6005 	if (mpt->m_polled_intr) {
6006 		mutex_exit(&mpt->m_mutex);
6007 		mptsas_log(mpt, CE_WARN, "mpt_sas: Unclaimed interrupt");
6008 		return (DDI_INTR_UNCLAIMED);
6009 	}
6010 
6011 	/*
6012 	 * Read the istat register.
6013 	 */
6014 	if ((INTPENDING(mpt)) != 0) {
6015 		/*
6016 		 * read fifo until empty.
6017 		 */
6018 #ifndef __lock_lint
6019 		_NOTE(CONSTCOND)
6020 #endif
6021 		while (TRUE) {
6022 			(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
6023 			    DDI_DMA_SYNC_FORCPU);
6024 			reply_desc_union = (pMpi2ReplyDescriptorsUnion_t)
6025 			    MPTSAS_GET_NEXT_REPLY(mpt, mpt->m_post_index);
6026 
6027 			if (ddi_get32(mpt->m_acc_post_queue_hdl,
6028 			    &reply_desc_union->Words.Low) == 0xFFFFFFFF ||
6029 			    ddi_get32(mpt->m_acc_post_queue_hdl,
6030 			    &reply_desc_union->Words.High) == 0xFFFFFFFF) {
6031 				break;
6032 			}
6033 
6034 			/*
6035 			 * The reply is valid, process it according to its
6036 			 * type.  Also, set a flag for updating the reply index
6037 			 * after they've all been processed.
6038 			 */
6039 			did_reply = TRUE;
6040 
6041 			mptsas_process_intr(mpt, reply_desc_union);
6042 
6043 			/*
6044 			 * Increment post index and roll over if needed.
6045 			 */
6046 			if (++mpt->m_post_index == mpt->m_post_queue_depth) {
6047 				mpt->m_post_index = 0;
6048 			}
6049 		}
6050 
6051 		/*
6052 		 * Update the global reply index if at least one reply was
6053 		 * processed.
6054 		 */
6055 		if (did_reply) {
6056 			ddi_put32(mpt->m_datap,
6057 			    &mpt->m_reg->ReplyPostHostIndex, mpt->m_post_index);
6058 		}
6059 	} else {
6060 		mutex_exit(&mpt->m_mutex);
6061 		return (DDI_INTR_UNCLAIMED);
6062 	}
6063 	NDBG1(("mptsas_intr complete"));
6064 
6065 	/*
6066 	 * If no helper threads are created, process the doneq in ISR. If
6067 	 * helpers are created, use the doneq length as a metric to measure the
6068 	 * load on the interrupt CPU. If it is long enough, which indicates the
6069 	 * load is heavy, then we deliver the IO completions to the helpers.
6070 	 * This measurement has some limitations, although it is simple and
6071 	 * straightforward and works well for most of the cases at present.
6072 	 */
6073 	if (!mpt->m_doneq_thread_n ||
6074 	    (mpt->m_doneq_len <= mpt->m_doneq_length_threshold)) {
6075 		mptsas_doneq_empty(mpt);
6076 	} else {
6077 		mptsas_deliver_doneq_thread(mpt);
6078 	}
6079 
6080 	/*
6081 	 * If there are queued cmd, start them now.
6082 	 */
6083 	if (mpt->m_waitq != NULL) {
6084 		mptsas_restart_waitq(mpt);
6085 	}
6086 
6087 	mutex_exit(&mpt->m_mutex);
6088 	return (DDI_INTR_CLAIMED);
6089 }
6090 
6091 static void
mptsas_process_intr(mptsas_t * mpt,pMpi2ReplyDescriptorsUnion_t reply_desc_union)6092 mptsas_process_intr(mptsas_t *mpt,
6093     pMpi2ReplyDescriptorsUnion_t reply_desc_union)
6094 {
6095 	uint8_t	reply_type;
6096 
6097 	ASSERT(mutex_owned(&mpt->m_mutex));
6098 
6099 	/*
6100 	 * The reply is valid, process it according to its
6101 	 * type.  Also, set a flag for updated the reply index
6102 	 * after they've all been processed.
6103 	 */
6104 	reply_type = ddi_get8(mpt->m_acc_post_queue_hdl,
6105 	    &reply_desc_union->Default.ReplyFlags);
6106 	reply_type &= MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
6107 	if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
6108 	    reply_type == MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS) {
6109 		mptsas_handle_scsi_io_success(mpt, reply_desc_union);
6110 	} else if (reply_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
6111 		mptsas_handle_address_reply(mpt, reply_desc_union);
6112 	} else {
6113 		mptsas_log(mpt, CE_WARN, "?Bad reply type %x", reply_type);
6114 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
6115 	}
6116 
6117 	/*
6118 	 * Clear the reply descriptor for re-use and increment
6119 	 * index.
6120 	 */
6121 	ddi_put64(mpt->m_acc_post_queue_hdl,
6122 	    &((uint64_t *)(void *)mpt->m_post_queue)[mpt->m_post_index],
6123 	    0xFFFFFFFFFFFFFFFF);
6124 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
6125 	    DDI_DMA_SYNC_FORDEV);
6126 }
6127 
6128 /*
6129  * handle qfull condition
6130  */
6131 static void
mptsas_handle_qfull(mptsas_t * mpt,mptsas_cmd_t * cmd)6132 mptsas_handle_qfull(mptsas_t *mpt, mptsas_cmd_t *cmd)
6133 {
6134 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
6135 
6136 	if ((++cmd->cmd_qfull_retries > ptgt->m_qfull_retries) ||
6137 	    (ptgt->m_qfull_retries == 0)) {
6138 		/*
6139 		 * We have exhausted the retries on QFULL, or,
6140 		 * the target driver has indicated that it
6141 		 * wants to handle QFULL itself by setting
6142 		 * qfull-retries capability to 0. In either case
6143 		 * we want the target driver's QFULL handling
6144 		 * to kick in. We do this by having pkt_reason
6145 		 * as CMD_CMPLT and pkt_scbp as STATUS_QFULL.
6146 		 */
6147 		mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
6148 	} else {
6149 		if (ptgt->m_reset_delay == 0) {
6150 			ptgt->m_t_throttle =
6151 			    max((ptgt->m_t_ncmds - 2), 0);
6152 		}
6153 
6154 		cmd->cmd_pkt_flags |= FLAG_HEAD;
6155 		cmd->cmd_flags &= ~(CFLAG_TRANFLAG);
6156 		cmd->cmd_flags |= CFLAG_RETRY;
6157 
6158 		(void) mptsas_accept_pkt(mpt, cmd);
6159 
6160 		/*
6161 		 * when target gives queue full status with no commands
6162 		 * outstanding (m_t_ncmds == 0), throttle is set to 0
6163 		 * (HOLD_THROTTLE), and the queue full handling start
6164 		 * (see psarc/1994/313); if there are commands outstanding,
6165 		 * throttle is set to (m_t_ncmds - 2)
6166 		 */
6167 		if (ptgt->m_t_throttle == HOLD_THROTTLE) {
6168 			/*
6169 			 * By setting throttle to QFULL_THROTTLE, we
6170 			 * avoid submitting new commands and in
6171 			 * mptsas_restart_cmd find out slots which need
6172 			 * their throttles to be cleared.
6173 			 */
6174 			mptsas_set_throttle(mpt, ptgt, QFULL_THROTTLE);
6175 			if (mpt->m_restart_cmd_timeid == 0) {
6176 				mpt->m_restart_cmd_timeid =
6177 				    timeout(mptsas_restart_cmd, mpt,
6178 				    ptgt->m_qfull_retry_interval);
6179 			}
6180 		}
6181 	}
6182 }
6183 
6184 mptsas_phymask_t
mptsas_physport_to_phymask(mptsas_t * mpt,uint8_t physport)6185 mptsas_physport_to_phymask(mptsas_t *mpt, uint8_t physport)
6186 {
6187 	mptsas_phymask_t	phy_mask = 0;
6188 	uint8_t			i = 0;
6189 
6190 	NDBG20(("mptsas%d physport_to_phymask enter", mpt->m_instance));
6191 
6192 	ASSERT(mutex_owned(&mpt->m_mutex));
6193 
6194 	/*
6195 	 * If physport is 0xFF, this is a RAID volume.  Use phymask of 0.
6196 	 */
6197 	if (physport == 0xFF) {
6198 		return (0);
6199 	}
6200 
6201 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
6202 		if (mpt->m_phy_info[i].attached_devhdl &&
6203 		    (mpt->m_phy_info[i].phy_mask != 0) &&
6204 		    (mpt->m_phy_info[i].port_num == physport)) {
6205 			phy_mask = mpt->m_phy_info[i].phy_mask;
6206 			break;
6207 		}
6208 	}
6209 	NDBG20(("mptsas%d physport_to_phymask:physport :%x phymask :%x, ",
6210 	    mpt->m_instance, physport, phy_mask));
6211 	return (phy_mask);
6212 }
6213 
6214 /*
6215  * mpt free device handle after device gone, by use of passthrough
6216  */
6217 static int
mptsas_free_devhdl(mptsas_t * mpt,uint16_t devhdl)6218 mptsas_free_devhdl(mptsas_t *mpt, uint16_t devhdl)
6219 {
6220 	Mpi2SasIoUnitControlRequest_t	req;
6221 	Mpi2SasIoUnitControlReply_t	rep;
6222 	int				ret;
6223 
6224 	ASSERT(mutex_owned(&mpt->m_mutex));
6225 
6226 	/*
6227 	 * Need to compose a SAS IO Unit Control request message
6228 	 * and call mptsas_do_passthru() function
6229 	 */
6230 	bzero(&req, sizeof (req));
6231 	bzero(&rep, sizeof (rep));
6232 
6233 	req.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
6234 	req.Operation = MPI2_SAS_OP_REMOVE_DEVICE;
6235 	req.DevHandle = LE_16(devhdl);
6236 
6237 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
6238 	    sizeof (req), sizeof (rep), 0, MPTSAS_PASS_THRU_DIRECTION_NONE,
6239 	    NULL, 0, 60, FKIOCTL);
6240 	if (ret != 0) {
6241 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6242 		    "Control error %d", ret);
6243 		return (DDI_FAILURE);
6244 	}
6245 
6246 	/* do passthrough success, check the ioc status */
6247 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
6248 		cmn_err(CE_WARN, "mptsas_free_devhdl: passthru SAS IO Unit "
6249 		    "Control IOCStatus %d", LE_16(rep.IOCStatus));
6250 		return (DDI_FAILURE);
6251 	}
6252 
6253 	return (DDI_SUCCESS);
6254 }
6255 
6256 /*
6257  * We have a SATA target that has changed, which means the "bridge-port"
6258  * property must be updated to reflect the SAS WWN of the new attachment point.
6259  * This may change if a SATA device changes which bay, and therefore phy, it is
6260  * plugged into. This SATA device may be a multipath virtual device or may be a
6261  * physical device. We have to handle both cases.
6262  */
6263 static boolean_t
mptsas_update_sata_bridge(mptsas_t * mpt,dev_info_t * parent,mptsas_target_t * ptgt)6264 mptsas_update_sata_bridge(mptsas_t *mpt, dev_info_t *parent,
6265     mptsas_target_t *ptgt)
6266 {
6267 	int			rval;
6268 	uint16_t		dev_hdl;
6269 	uint16_t		pdev_hdl;
6270 	uint64_t		dev_sas_wwn;
6271 	uint8_t			physport;
6272 	uint8_t			phy_id;
6273 	uint32_t		page_address;
6274 	uint16_t		bay_num, enclosure, io_flags;
6275 	uint32_t		dev_info;
6276 	char			uabuf[SCSI_WWN_BUFLEN];
6277 	dev_info_t		*dip;
6278 	mdi_pathinfo_t		*pip;
6279 
6280 	mutex_enter(&mpt->m_mutex);
6281 	page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6282 	    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)ptgt->m_devhdl;
6283 	rval = mptsas_get_sas_device_page0(mpt, page_address, &dev_hdl,
6284 	    &dev_sas_wwn, &dev_info, &physport, &phy_id, &pdev_hdl, &bay_num,
6285 	    &enclosure, &io_flags);
6286 	mutex_exit(&mpt->m_mutex);
6287 	if (rval != DDI_SUCCESS) {
6288 		mptsas_log(mpt, CE_WARN, "unable to get SAS page 0 for "
6289 		    "handle %d", page_address);
6290 		return (B_FALSE);
6291 	}
6292 
6293 	if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
6294 		mptsas_log(mpt, CE_WARN,
6295 		    "mptsas unable to format SATA bridge WWN");
6296 		return (B_FALSE);
6297 	}
6298 
6299 	if (mpt->m_mpxio_enable == TRUE && (pip = mptsas_find_path_addr(parent,
6300 	    ptgt->m_addr.mta_wwn, 0)) != NULL) {
6301 		if (mdi_prop_update_string(pip, SCSI_ADDR_PROP_BRIDGE_PORT,
6302 		    uabuf) != DDI_SUCCESS) {
6303 			mptsas_log(mpt, CE_WARN,
6304 			    "mptsas unable to create SCSI bridge port "
6305 			    "property for SATA device");
6306 			return (B_FALSE);
6307 		}
6308 		return (B_TRUE);
6309 	}
6310 
6311 	if ((dip = mptsas_find_child_addr(parent, ptgt->m_addr.mta_wwn,
6312 	    0)) != NULL) {
6313 		if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
6314 		    SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) != DDI_PROP_SUCCESS) {
6315 			mptsas_log(mpt, CE_WARN,
6316 			    "mptsas unable to create SCSI bridge port "
6317 			    "property for SATA device");
6318 			return (B_FALSE);
6319 		}
6320 		return (B_TRUE);
6321 	}
6322 
6323 	mptsas_log(mpt, CE_WARN, "mptsas failed to find dev_info_t or "
6324 	    "mdi_pathinfo_t for target with WWN %016" PRIx64,
6325 	    ptgt->m_addr.mta_wwn);
6326 
6327 	return (B_FALSE);
6328 }
6329 
6330 static void
mptsas_update_phymask(mptsas_t * mpt)6331 mptsas_update_phymask(mptsas_t *mpt)
6332 {
6333 	mptsas_phymask_t mask = 0, phy_mask;
6334 	char		*phy_mask_name;
6335 	uint8_t		current_port;
6336 	int		i, j;
6337 
6338 	NDBG20(("mptsas%d update phymask ", mpt->m_instance));
6339 
6340 	ASSERT(mutex_owned(&mpt->m_mutex));
6341 
6342 	(void) mptsas_get_sas_io_unit_page(mpt);
6343 
6344 	phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6345 
6346 	for (i = 0; i < mpt->m_num_phys; i++) {
6347 		phy_mask = 0x00;
6348 
6349 		if (mpt->m_phy_info[i].attached_devhdl == 0)
6350 			continue;
6351 
6352 		bzero(phy_mask_name, sizeof (phy_mask_name));
6353 
6354 		current_port = mpt->m_phy_info[i].port_num;
6355 
6356 		if ((mask & (1 << i)) != 0)
6357 			continue;
6358 
6359 		for (j = 0; j < mpt->m_num_phys; j++) {
6360 			if (mpt->m_phy_info[j].attached_devhdl &&
6361 			    (mpt->m_phy_info[j].port_num == current_port)) {
6362 				phy_mask |= (1 << j);
6363 			}
6364 		}
6365 		mask = mask | phy_mask;
6366 
6367 		for (j = 0; j < mpt->m_num_phys; j++) {
6368 			if ((phy_mask >> j) & 0x01) {
6369 				mpt->m_phy_info[j].phy_mask = phy_mask;
6370 			}
6371 		}
6372 
6373 		(void) sprintf(phy_mask_name, "%x", phy_mask);
6374 
6375 		mutex_exit(&mpt->m_mutex);
6376 		/*
6377 		 * register a iport, if the port has already been existed
6378 		 * SCSA will do nothing and just return.
6379 		 */
6380 		(void) scsi_hba_iport_register(mpt->m_dip, phy_mask_name);
6381 		mutex_enter(&mpt->m_mutex);
6382 	}
6383 	kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6384 	NDBG20(("mptsas%d update phymask return", mpt->m_instance));
6385 }
6386 
6387 /*
6388  * mptsas_handle_dr is a task handler for DR, the DR action includes:
6389  * 1. Directly attched Device Added/Removed.
6390  * 2. Expander Device Added/Removed.
6391  * 3. Indirectly Attached Device Added/Expander.
6392  * 4. LUNs of a existing device status change.
6393  * 5. RAID volume created/deleted.
6394  * 6. Member of RAID volume is released because of RAID deletion.
6395  * 7. Physical disks are removed because of RAID creation.
6396  */
6397 static void
mptsas_handle_dr(void * args)6398 mptsas_handle_dr(void *args)
6399 {
6400 	mptsas_topo_change_list_t	*topo_node = NULL;
6401 	mptsas_topo_change_list_t	*save_node = NULL;
6402 	mptsas_t			*mpt;
6403 	dev_info_t			*parent = NULL;
6404 	mptsas_phymask_t		phymask = 0;
6405 	char				*phy_mask_name;
6406 	uint8_t				flags = 0, physport = 0xff;
6407 	uint8_t				port_update = 0;
6408 	uint_t				event;
6409 
6410 	topo_node = (mptsas_topo_change_list_t *)args;
6411 
6412 	mpt = topo_node->mpt;
6413 	event = topo_node->event;
6414 	flags = topo_node->flags;
6415 
6416 	phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6417 
6418 	NDBG20(("mptsas%d handle_dr enter", mpt->m_instance));
6419 
6420 	switch (event) {
6421 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6422 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6423 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE) ||
6424 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6425 			/*
6426 			 * Direct attached or expander attached device added
6427 			 * into system or a Phys Disk that is being unhidden.
6428 			 */
6429 			port_update = 1;
6430 		}
6431 		break;
6432 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
6433 		/*
6434 		 * New expander added into system, it must be the head
6435 		 * of topo_change_list_t
6436 		 */
6437 		port_update = 1;
6438 		break;
6439 	default:
6440 		port_update = 0;
6441 		break;
6442 	}
6443 	/*
6444 	 * All cases port_update == 1 may cause initiator port form change
6445 	 */
6446 	mutex_enter(&mpt->m_mutex);
6447 	if (mpt->m_port_chng && port_update) {
6448 		/*
6449 		 * mpt->m_port_chng flag indicates some PHYs of initiator
6450 		 * port have changed to online. So when expander added or
6451 		 * directly attached device online event come, we force to
6452 		 * update port information by issueing SAS IO Unit Page and
6453 		 * update PHYMASKs.
6454 		 */
6455 		(void) mptsas_update_phymask(mpt);
6456 		mpt->m_port_chng = 0;
6457 
6458 	}
6459 	mutex_exit(&mpt->m_mutex);
6460 	while (topo_node) {
6461 		phymask = 0;
6462 		if (parent == NULL) {
6463 			physport = topo_node->un.physport;
6464 			event = topo_node->event;
6465 			flags = topo_node->flags;
6466 			if (event & (MPTSAS_DR_EVENT_OFFLINE_TARGET |
6467 			    MPTSAS_DR_EVENT_OFFLINE_SMP)) {
6468 				/*
6469 				 * For all offline events, phymask is known
6470 				 */
6471 				phymask = topo_node->un.phymask;
6472 				goto find_parent;
6473 			}
6474 			if (event & MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6475 				goto handle_topo_change;
6476 			}
6477 			if (flags & MPTSAS_TOPO_FLAG_LUN_ASSOCIATED) {
6478 				phymask = topo_node->un.phymask;
6479 				goto find_parent;
6480 			}
6481 
6482 			if ((flags ==
6483 			    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) &&
6484 			    (event == MPTSAS_DR_EVENT_RECONFIG_TARGET)) {
6485 				/*
6486 				 * There is no any field in IR_CONFIG_CHANGE
6487 				 * event indicate physport/phynum, let's get
6488 				 * parent after SAS Device Page0 request.
6489 				 */
6490 				goto handle_topo_change;
6491 			}
6492 
6493 			mutex_enter(&mpt->m_mutex);
6494 			if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6495 				/*
6496 				 * If the direct attached device added or a
6497 				 * phys disk is being unhidden, argument
6498 				 * physport actually is PHY#, so we have to get
6499 				 * phymask according PHY#.
6500 				 */
6501 				physport = mpt->m_phy_info[physport].port_num;
6502 			}
6503 
6504 			/*
6505 			 * Translate physport to phymask so that we can search
6506 			 * parent dip.
6507 			 */
6508 			phymask = mptsas_physport_to_phymask(mpt,
6509 			    physport);
6510 			mutex_exit(&mpt->m_mutex);
6511 
6512 find_parent:
6513 			bzero(phy_mask_name, MPTSAS_MAX_PHYS);
6514 			/*
6515 			 * For RAID topology change node, write the iport name
6516 			 * as v0.
6517 			 */
6518 			if (flags & MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6519 				(void) sprintf(phy_mask_name, "v0");
6520 			} else {
6521 				/*
6522 				 * phymask can bo 0 if the drive has been
6523 				 * pulled by the time an add event is
6524 				 * processed.  If phymask is 0, just skip this
6525 				 * event and continue.
6526 				 */
6527 				if (phymask == 0) {
6528 					mutex_enter(&mpt->m_mutex);
6529 					save_node = topo_node;
6530 					topo_node = topo_node->next;
6531 					ASSERT(save_node);
6532 					kmem_free(save_node,
6533 					    sizeof (mptsas_topo_change_list_t));
6534 					mutex_exit(&mpt->m_mutex);
6535 
6536 					parent = NULL;
6537 					continue;
6538 				}
6539 				(void) sprintf(phy_mask_name, "%x", phymask);
6540 			}
6541 			parent = scsi_hba_iport_find(mpt->m_dip,
6542 			    phy_mask_name);
6543 			if (parent == NULL) {
6544 				mptsas_log(mpt, CE_WARN, "Failed to find an "
6545 				    "iport, should not happen!");
6546 				goto out;
6547 			}
6548 
6549 		}
6550 		ASSERT(parent);
6551 handle_topo_change:
6552 
6553 		mutex_enter(&mpt->m_mutex);
6554 		/*
6555 		 * If HBA is being reset, don't perform operations depending
6556 		 * on the IOC. We must free the topo list, however.
6557 		 */
6558 		if (!mpt->m_in_reset) {
6559 			mptsas_handle_topo_change(topo_node, parent);
6560 		} else {
6561 			NDBG20(("skipping topo change received during reset"));
6562 		}
6563 		save_node = topo_node;
6564 		topo_node = topo_node->next;
6565 		ASSERT(save_node);
6566 		kmem_free(save_node, sizeof (mptsas_topo_change_list_t));
6567 		mutex_exit(&mpt->m_mutex);
6568 
6569 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6570 		    (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) ||
6571 		    (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED)) {
6572 			/*
6573 			 * If direct attached device associated, make sure
6574 			 * reset the parent before start the next one. But
6575 			 * all devices associated with expander shares the
6576 			 * parent.  Also, reset parent if this is for RAID.
6577 			 */
6578 			parent = NULL;
6579 		}
6580 	}
6581 out:
6582 	kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6583 }
6584 
6585 static void
mptsas_handle_topo_change(mptsas_topo_change_list_t * topo_node,dev_info_t * parent)6586 mptsas_handle_topo_change(mptsas_topo_change_list_t *topo_node,
6587     dev_info_t *parent)
6588 {
6589 	mptsas_target_t	*ptgt = NULL;
6590 	mptsas_smp_t	*psmp = NULL;
6591 	mptsas_t	*mpt = (void *)topo_node->mpt;
6592 	uint16_t	devhdl;
6593 	uint16_t	attached_devhdl;
6594 	uint64_t	sas_wwn = 0;
6595 	int		rval = 0;
6596 	uint32_t	page_address;
6597 	uint8_t		phy, flags;
6598 	char		*addr = NULL;
6599 	dev_info_t	*lundip;
6600 	char		attached_wwnstr[MPTSAS_WWN_STRLEN];
6601 
6602 	NDBG20(("mptsas%d handle_topo_change enter, devhdl 0x%x,"
6603 	    "event 0x%x, flags 0x%x", mpt->m_instance, topo_node->devhdl,
6604 	    topo_node->event, topo_node->flags));
6605 
6606 	ASSERT(mutex_owned(&mpt->m_mutex));
6607 
6608 	switch (topo_node->event) {
6609 	case MPTSAS_DR_EVENT_RECONFIG_TARGET:
6610 	{
6611 		char *phy_mask_name;
6612 		mptsas_phymask_t phymask = 0;
6613 
6614 		if (topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6615 			/*
6616 			 * Get latest RAID info.
6617 			 */
6618 			(void) mptsas_get_raid_info(mpt);
6619 			ptgt = refhash_linear_search(mpt->m_targets,
6620 			    mptsas_target_eval_devhdl, &topo_node->devhdl);
6621 			if (ptgt == NULL)
6622 				break;
6623 		} else {
6624 			ptgt = (void *)topo_node->object;
6625 		}
6626 
6627 		if (ptgt == NULL) {
6628 			/*
6629 			 * If a Phys Disk was deleted, RAID info needs to be
6630 			 * updated to reflect the new topology.
6631 			 */
6632 			(void) mptsas_get_raid_info(mpt);
6633 
6634 			/*
6635 			 * Get sas device page 0 by DevHandle to make sure if
6636 			 * SSP/SATA end device exist.
6637 			 */
6638 			page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
6639 			    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
6640 			    topo_node->devhdl;
6641 
6642 			rval = mptsas_get_target_device_info(mpt, page_address,
6643 			    &devhdl, &ptgt);
6644 			if (rval == DEV_INFO_WRONG_DEVICE_TYPE) {
6645 				mptsas_log(mpt, CE_NOTE,
6646 				    "mptsas_handle_topo_change: target %d is "
6647 				    "not a SAS/SATA device. \n",
6648 				    topo_node->devhdl);
6649 			} else if (rval == DEV_INFO_FAIL_ALLOC) {
6650 				mptsas_log(mpt, CE_NOTE,
6651 				    "mptsas_handle_topo_change: could not "
6652 				    "allocate memory. \n");
6653 			} else if (rval == DEV_INFO_FAIL_GUID) {
6654 				mptsas_log(mpt, CE_NOTE,
6655 				    "mptsas_handle_topo_change: could not "
6656 				    "get SATA GUID for target %d. \n",
6657 				    topo_node->devhdl);
6658 			}
6659 			/*
6660 			 * If rval is DEV_INFO_PHYS_DISK or indicates failure
6661 			 * then there is nothing else to do, just leave.
6662 			 */
6663 			if (rval != DEV_INFO_SUCCESS) {
6664 				return;
6665 			}
6666 		}
6667 
6668 		ASSERT(ptgt->m_devhdl == topo_node->devhdl);
6669 
6670 		mutex_exit(&mpt->m_mutex);
6671 		flags = topo_node->flags;
6672 
6673 		if (flags == MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED) {
6674 			phymask = ptgt->m_addr.mta_phymask;
6675 			phy_mask_name = kmem_zalloc(MPTSAS_MAX_PHYS, KM_SLEEP);
6676 			(void) sprintf(phy_mask_name, "%x", phymask);
6677 			parent = scsi_hba_iport_find(mpt->m_dip,
6678 			    phy_mask_name);
6679 			kmem_free(phy_mask_name, MPTSAS_MAX_PHYS);
6680 			if (parent == NULL) {
6681 				mptsas_log(mpt, CE_WARN, "Failed to find a "
6682 				    "iport for PD, should not happen!");
6683 				mutex_enter(&mpt->m_mutex);
6684 				break;
6685 			}
6686 		}
6687 
6688 		if (flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) {
6689 			ndi_devi_enter(parent);
6690 			(void) mptsas_config_raid(parent, topo_node->devhdl,
6691 			    &lundip);
6692 			ndi_devi_exit(parent);
6693 		} else {
6694 			/*
6695 			 * hold nexus for bus configure
6696 			 */
6697 			ndi_devi_enter(scsi_vhci_dip);
6698 			ndi_devi_enter(parent);
6699 			rval = mptsas_config_target(parent, ptgt);
6700 			/*
6701 			 * release nexus for bus configure
6702 			 */
6703 			ndi_devi_exit(parent);
6704 			ndi_devi_exit(scsi_vhci_dip);
6705 
6706 			/*
6707 			 * If this is a SATA device, make sure that the
6708 			 * bridge-port (the SAS WWN that the SATA device is
6709 			 * plugged into) is updated. This may change if a SATA
6710 			 * device changes which bay, and therefore phy, it is
6711 			 * plugged into.
6712 			 */
6713 			if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
6714 				if (!mptsas_update_sata_bridge(mpt, parent,
6715 				    ptgt)) {
6716 					mutex_enter(&mpt->m_mutex);
6717 					return;
6718 				}
6719 			}
6720 
6721 			/*
6722 			 * Add parent's props for SMHBA support
6723 			 */
6724 			if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6725 				bzero(attached_wwnstr,
6726 				    sizeof (attached_wwnstr));
6727 				(void) sprintf(attached_wwnstr, "w%016"PRIx64,
6728 				    ptgt->m_addr.mta_wwn);
6729 				if (ddi_prop_update_string(DDI_DEV_T_NONE,
6730 				    parent,
6731 				    SCSI_ADDR_PROP_ATTACHED_PORT,
6732 				    attached_wwnstr)
6733 				    != DDI_PROP_SUCCESS) {
6734 					(void) ddi_prop_remove(DDI_DEV_T_NONE,
6735 					    parent,
6736 					    SCSI_ADDR_PROP_ATTACHED_PORT);
6737 					mptsas_log(mpt, CE_WARN, "Failed to"
6738 					    "attached-port props");
6739 					mutex_enter(&mpt->m_mutex);
6740 					return;
6741 				}
6742 				if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6743 				    MPTSAS_NUM_PHYS, 1) !=
6744 				    DDI_PROP_SUCCESS) {
6745 					(void) ddi_prop_remove(DDI_DEV_T_NONE,
6746 					    parent, MPTSAS_NUM_PHYS);
6747 					mptsas_log(mpt, CE_WARN, "Failed to"
6748 					    " create num-phys props");
6749 					mutex_enter(&mpt->m_mutex);
6750 					return;
6751 				}
6752 
6753 				/*
6754 				 * Update PHY info for smhba
6755 				 */
6756 				mutex_enter(&mpt->m_mutex);
6757 				if (mptsas_smhba_phy_init(mpt)) {
6758 					mptsas_log(mpt, CE_WARN, "mptsas phy"
6759 					    " update failed");
6760 					return;
6761 				}
6762 				mutex_exit(&mpt->m_mutex);
6763 
6764 				/*
6765 				 * topo_node->un.physport is really the PHY#
6766 				 * for direct attached devices
6767 				 */
6768 				mptsas_smhba_set_one_phy_props(mpt, parent,
6769 				    topo_node->un.physport, &attached_devhdl);
6770 
6771 				if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6772 				    MPTSAS_VIRTUAL_PORT, 0) !=
6773 				    DDI_PROP_SUCCESS) {
6774 					(void) ddi_prop_remove(DDI_DEV_T_NONE,
6775 					    parent, MPTSAS_VIRTUAL_PORT);
6776 					mptsas_log(mpt, CE_WARN,
6777 					    "mptsas virtual-port"
6778 					    "port prop update failed");
6779 					mutex_enter(&mpt->m_mutex);
6780 					return;
6781 				}
6782 			}
6783 		}
6784 		mutex_enter(&mpt->m_mutex);
6785 
6786 		NDBG20(("mptsas%d handle_topo_change to online devhdl:%x, "
6787 		    "phymask:%x.", mpt->m_instance, ptgt->m_devhdl,
6788 		    ptgt->m_addr.mta_phymask));
6789 		break;
6790 	}
6791 	case MPTSAS_DR_EVENT_OFFLINE_TARGET:
6792 	{
6793 		devhdl = topo_node->devhdl;
6794 		ptgt = refhash_linear_search(mpt->m_targets,
6795 		    mptsas_target_eval_devhdl, &devhdl);
6796 		if (ptgt == NULL)
6797 			break;
6798 
6799 		sas_wwn = ptgt->m_addr.mta_wwn;
6800 		phy = ptgt->m_phynum;
6801 
6802 		addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
6803 
6804 		if (sas_wwn) {
6805 			(void) sprintf(addr, "w%016"PRIx64, sas_wwn);
6806 		} else {
6807 			(void) sprintf(addr, "p%x", phy);
6808 		}
6809 		ASSERT(ptgt->m_devhdl == devhdl);
6810 
6811 		if ((topo_node->flags == MPTSAS_TOPO_FLAG_RAID_ASSOCIATED) ||
6812 		    (topo_node->flags ==
6813 		    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED)) {
6814 			/*
6815 			 * Get latest RAID info if RAID volume status changes
6816 			 * or Phys Disk status changes
6817 			 */
6818 			(void) mptsas_get_raid_info(mpt);
6819 		}
6820 		/*
6821 		 * Abort all outstanding command on the device
6822 		 */
6823 		rval = mptsas_do_scsi_reset(mpt, devhdl);
6824 		if (rval) {
6825 			NDBG20(("mptsas%d handle_topo_change to reset target "
6826 			    "before offline devhdl:%x, phymask:%x, rval:%x",
6827 			    mpt->m_instance, ptgt->m_devhdl,
6828 			    ptgt->m_addr.mta_phymask, rval));
6829 		}
6830 
6831 		mutex_exit(&mpt->m_mutex);
6832 
6833 		ndi_devi_enter(scsi_vhci_dip);
6834 		ndi_devi_enter(parent);
6835 		rval = mptsas_offline_target(parent, addr);
6836 		ndi_devi_exit(parent);
6837 		ndi_devi_exit(scsi_vhci_dip);
6838 		NDBG20(("mptsas%d handle_topo_change to offline devhdl:%x, "
6839 		    "phymask:%x, rval:%x", mpt->m_instance,
6840 		    ptgt->m_devhdl, ptgt->m_addr.mta_phymask, rval));
6841 
6842 		kmem_free(addr, SCSI_MAXNAMELEN);
6843 
6844 		/*
6845 		 * Clear parent's props for SMHBA support
6846 		 */
6847 		flags = topo_node->flags;
6848 		if (flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) {
6849 			bzero(attached_wwnstr, sizeof (attached_wwnstr));
6850 			if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
6851 			    SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
6852 			    DDI_PROP_SUCCESS) {
6853 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6854 				    SCSI_ADDR_PROP_ATTACHED_PORT);
6855 				mptsas_log(mpt, CE_WARN, "mptsas attached port "
6856 				    "prop update failed");
6857 				mutex_enter(&mpt->m_mutex);
6858 				break;
6859 			}
6860 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6861 			    MPTSAS_NUM_PHYS, 0) !=
6862 			    DDI_PROP_SUCCESS) {
6863 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6864 				    MPTSAS_NUM_PHYS);
6865 				mptsas_log(mpt, CE_WARN, "mptsas num phys "
6866 				    "prop update failed");
6867 				mutex_enter(&mpt->m_mutex);
6868 				break;
6869 			}
6870 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6871 			    MPTSAS_VIRTUAL_PORT, 1) !=
6872 			    DDI_PROP_SUCCESS) {
6873 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6874 				    MPTSAS_VIRTUAL_PORT);
6875 				mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6876 				    "prop update failed");
6877 				mutex_enter(&mpt->m_mutex);
6878 				break;
6879 			}
6880 		}
6881 
6882 		mutex_enter(&mpt->m_mutex);
6883 		if (rval == DDI_SUCCESS) {
6884 			refhash_remove(mpt->m_targets, ptgt);
6885 			ptgt = NULL;
6886 		} else {
6887 			/*
6888 			 * clean DR_INTRANSITION flag to allow I/O down to
6889 			 * PHCI driver since failover finished.
6890 			 * Invalidate the devhdl
6891 			 */
6892 			ptgt->m_devhdl = MPTSAS_INVALID_DEVHDL;
6893 			ptgt->m_tgt_unconfigured = 0;
6894 			mutex_enter(&mpt->m_tx_waitq_mutex);
6895 			ptgt->m_dr_flag = MPTSAS_DR_INACTIVE;
6896 			mutex_exit(&mpt->m_tx_waitq_mutex);
6897 		}
6898 
6899 		/*
6900 		 * Send SAS IO Unit Control to free the dev handle
6901 		 */
6902 		if ((flags == MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE) ||
6903 		    (flags == MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE)) {
6904 			rval = mptsas_free_devhdl(mpt, devhdl);
6905 
6906 			NDBG20(("mptsas%d handle_topo_change to remove "
6907 			    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6908 			    rval));
6909 		}
6910 
6911 		break;
6912 	}
6913 	case MPTSAS_TOPO_FLAG_REMOVE_HANDLE:
6914 	{
6915 		devhdl = topo_node->devhdl;
6916 		/*
6917 		 * If this is the remove handle event, do a reset first.
6918 		 */
6919 		if (topo_node->event == MPTSAS_TOPO_FLAG_REMOVE_HANDLE) {
6920 			rval = mptsas_do_scsi_reset(mpt, devhdl);
6921 			if (rval) {
6922 				NDBG20(("mpt%d reset target before remove "
6923 				    "devhdl:%x, rval:%x", mpt->m_instance,
6924 				    devhdl, rval));
6925 			}
6926 		}
6927 
6928 		/*
6929 		 * Send SAS IO Unit Control to free the dev handle
6930 		 */
6931 		rval = mptsas_free_devhdl(mpt, devhdl);
6932 		NDBG20(("mptsas%d handle_topo_change to remove "
6933 		    "devhdl:%x, rval:%x", mpt->m_instance, devhdl,
6934 		    rval));
6935 		break;
6936 	}
6937 	case MPTSAS_DR_EVENT_RECONFIG_SMP:
6938 	{
6939 		mptsas_smp_t smp;
6940 		dev_info_t *smpdip;
6941 
6942 		devhdl = topo_node->devhdl;
6943 
6944 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
6945 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)devhdl;
6946 		rval = mptsas_get_sas_expander_page0(mpt, page_address, &smp);
6947 		if (rval != DDI_SUCCESS) {
6948 			mptsas_log(mpt, CE_WARN, "failed to online smp, "
6949 			    "handle %x", devhdl);
6950 			return;
6951 		}
6952 
6953 		psmp = mptsas_smp_alloc(mpt, &smp);
6954 		if (psmp == NULL) {
6955 			return;
6956 		}
6957 
6958 		mutex_exit(&mpt->m_mutex);
6959 		ndi_devi_enter(parent);
6960 		(void) mptsas_online_smp(parent, psmp, &smpdip);
6961 		ndi_devi_exit(parent);
6962 
6963 		mutex_enter(&mpt->m_mutex);
6964 		break;
6965 	}
6966 	case MPTSAS_DR_EVENT_OFFLINE_SMP:
6967 	{
6968 		devhdl = topo_node->devhdl;
6969 		uint32_t dev_info;
6970 
6971 		psmp = refhash_linear_search(mpt->m_smp_targets,
6972 		    mptsas_smp_eval_devhdl, &devhdl);
6973 		if (psmp == NULL)
6974 			break;
6975 		/*
6976 		 * The mptsas_smp_t data is released only if the dip is offlined
6977 		 * successfully.
6978 		 */
6979 		mutex_exit(&mpt->m_mutex);
6980 
6981 		ndi_devi_enter(parent);
6982 		rval = mptsas_offline_smp(parent, psmp, NDI_DEVI_REMOVE);
6983 		ndi_devi_exit(parent);
6984 
6985 		dev_info = psmp->m_deviceinfo;
6986 		if ((dev_info & DEVINFO_DIRECT_ATTACHED) ==
6987 		    DEVINFO_DIRECT_ATTACHED) {
6988 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
6989 			    MPTSAS_VIRTUAL_PORT, 1) !=
6990 			    DDI_PROP_SUCCESS) {
6991 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
6992 				    MPTSAS_VIRTUAL_PORT);
6993 				mptsas_log(mpt, CE_WARN, "mptsas virtual port "
6994 				    "prop update failed");
6995 				mutex_enter(&mpt->m_mutex);
6996 				return;
6997 			}
6998 			/*
6999 			 * Check whether the smp connected to the iport,
7000 			 */
7001 			if (ddi_prop_update_int(DDI_DEV_T_NONE, parent,
7002 			    MPTSAS_NUM_PHYS, 0) !=
7003 			    DDI_PROP_SUCCESS) {
7004 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
7005 				    MPTSAS_NUM_PHYS);
7006 				mptsas_log(mpt, CE_WARN, "mptsas num phys"
7007 				    "prop update failed");
7008 				mutex_enter(&mpt->m_mutex);
7009 				return;
7010 			}
7011 			/*
7012 			 * Clear parent's attached-port props
7013 			 */
7014 			bzero(attached_wwnstr, sizeof (attached_wwnstr));
7015 			if (ddi_prop_update_string(DDI_DEV_T_NONE, parent,
7016 			    SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwnstr) !=
7017 			    DDI_PROP_SUCCESS) {
7018 				(void) ddi_prop_remove(DDI_DEV_T_NONE, parent,
7019 				    SCSI_ADDR_PROP_ATTACHED_PORT);
7020 				mptsas_log(mpt, CE_WARN, "mptsas attached port "
7021 				    "prop update failed");
7022 				mutex_enter(&mpt->m_mutex);
7023 				return;
7024 			}
7025 		}
7026 
7027 		mutex_enter(&mpt->m_mutex);
7028 		NDBG20(("mptsas%d handle_topo_change to remove devhdl:%x, "
7029 		    "rval:%x", mpt->m_instance, psmp->m_devhdl, rval));
7030 		if (rval == DDI_SUCCESS) {
7031 			refhash_remove(mpt->m_smp_targets, psmp);
7032 		} else {
7033 			psmp->m_devhdl = MPTSAS_INVALID_DEVHDL;
7034 		}
7035 
7036 		bzero(attached_wwnstr, sizeof (attached_wwnstr));
7037 
7038 		break;
7039 	}
7040 	default:
7041 		return;
7042 	}
7043 }
7044 
7045 /*
7046  * Record the event if its type is enabled in mpt instance by ioctl.
7047  */
7048 static void
mptsas_record_event(void * args)7049 mptsas_record_event(void *args)
7050 {
7051 	m_replyh_arg_t			*replyh_arg;
7052 	pMpi2EventNotificationReply_t	eventreply;
7053 	uint32_t			event, rfm;
7054 	mptsas_t			*mpt;
7055 	int				i, j;
7056 	uint16_t			event_data_len;
7057 	boolean_t			sendAEN = FALSE;
7058 
7059 	replyh_arg = (m_replyh_arg_t *)args;
7060 	rfm = replyh_arg->rfm;
7061 	mpt = replyh_arg->mpt;
7062 
7063 	eventreply = (pMpi2EventNotificationReply_t)
7064 	    (mpt->m_reply_frame + (rfm -
7065 	    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7066 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7067 
7068 
7069 	/*
7070 	 * Generate a system event to let anyone who cares know that a
7071 	 * LOG_ENTRY_ADDED event has occurred.  This is sent no matter what the
7072 	 * event mask is set to.
7073 	 */
7074 	if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
7075 		sendAEN = TRUE;
7076 	}
7077 
7078 	/*
7079 	 * Record the event only if it is not masked.  Determine which dword
7080 	 * and bit of event mask to test.
7081 	 */
7082 	i = (uint8_t)(event / 32);
7083 	j = (uint8_t)(event % 32);
7084 	if ((i < 4) && ((1 << j) & mpt->m_event_mask[i])) {
7085 		i = mpt->m_event_index;
7086 		mpt->m_events[i].Type = event;
7087 		mpt->m_events[i].Number = ++mpt->m_event_number;
7088 		bzero(mpt->m_events[i].Data, MPTSAS_MAX_EVENT_DATA_LENGTH * 4);
7089 		event_data_len = ddi_get16(mpt->m_acc_reply_frame_hdl,
7090 		    &eventreply->EventDataLength);
7091 
7092 		if (event_data_len > 0) {
7093 			/*
7094 			 * Limit data to size in m_event entry
7095 			 */
7096 			if (event_data_len > MPTSAS_MAX_EVENT_DATA_LENGTH) {
7097 				event_data_len = MPTSAS_MAX_EVENT_DATA_LENGTH;
7098 			}
7099 			for (j = 0; j < event_data_len; j++) {
7100 				mpt->m_events[i].Data[j] =
7101 				    ddi_get32(mpt->m_acc_reply_frame_hdl,
7102 				    &(eventreply->EventData[j]));
7103 			}
7104 
7105 			/*
7106 			 * check for index wrap-around
7107 			 */
7108 			if (++i == MPTSAS_EVENT_QUEUE_SIZE) {
7109 				i = 0;
7110 			}
7111 			mpt->m_event_index = (uint8_t)i;
7112 
7113 			/*
7114 			 * Set flag to send the event.
7115 			 */
7116 			sendAEN = TRUE;
7117 		}
7118 	}
7119 
7120 	/*
7121 	 * Generate a system event if flag is set to let anyone who cares know
7122 	 * that an event has occurred.
7123 	 */
7124 	if (sendAEN) {
7125 		(void) ddi_log_sysevent(mpt->m_dip, DDI_VENDOR_LSI, "MPT_SAS",
7126 		    "SAS", NULL, NULL, DDI_NOSLEEP);
7127 	}
7128 }
7129 
7130 #define	SMP_RESET_IN_PROGRESS MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS
7131 /*
7132  * handle sync events from ioc in interrupt
7133  * return value:
7134  * DDI_SUCCESS: The event is handled by this func
7135  * DDI_FAILURE: Event is not handled
7136  */
7137 static int
mptsas_handle_event_sync(void * args)7138 mptsas_handle_event_sync(void *args)
7139 {
7140 	m_replyh_arg_t			*replyh_arg;
7141 	pMpi2EventNotificationReply_t	eventreply;
7142 	uint32_t			event, rfm;
7143 	mptsas_t			*mpt;
7144 	uint_t				iocstatus;
7145 
7146 	replyh_arg = (m_replyh_arg_t *)args;
7147 	rfm = replyh_arg->rfm;
7148 	mpt = replyh_arg->mpt;
7149 
7150 	ASSERT(mutex_owned(&mpt->m_mutex));
7151 
7152 	eventreply = (pMpi2EventNotificationReply_t)
7153 	    (mpt->m_reply_frame + (rfm -
7154 	    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7155 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7156 
7157 	if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7158 	    &eventreply->IOCStatus)) != 0) {
7159 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7160 			mptsas_log(mpt, CE_WARN,
7161 			    "!mptsas_handle_event_sync: event 0x%x, "
7162 			    "IOCStatus=0x%x, "
7163 			    "IOCLogInfo=0x%x", event, iocstatus,
7164 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7165 			    &eventreply->IOCLogInfo));
7166 		} else {
7167 			mptsas_log(mpt, CE_WARN,
7168 			    "mptsas_handle_event_sync: event 0x%x, "
7169 			    "IOCStatus=0x%x, "
7170 			    "(IOCLogInfo=0x%x)", event, iocstatus,
7171 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7172 			    &eventreply->IOCLogInfo));
7173 		}
7174 	}
7175 
7176 	/*
7177 	 * figure out what kind of event we got and handle accordingly
7178 	 */
7179 	switch (event) {
7180 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7181 	{
7182 		pMpi2EventDataSasTopologyChangeList_t	sas_topo_change_list;
7183 		uint8_t				num_entries, expstatus, phy;
7184 		uint8_t				phystatus, physport, state, i;
7185 		uint8_t				start_phy_num, link_rate;
7186 		uint16_t			dev_handle, reason_code;
7187 		uint16_t			enc_handle, expd_handle;
7188 		char				string[80], curr[80], prev[80];
7189 		mptsas_topo_change_list_t	*topo_head = NULL;
7190 		mptsas_topo_change_list_t	*topo_tail = NULL;
7191 		mptsas_topo_change_list_t	*topo_node = NULL;
7192 		mptsas_target_t			*ptgt;
7193 		mptsas_smp_t			*psmp;
7194 		uint8_t				flags = 0, exp_flag;
7195 		smhba_info_t			*pSmhba = NULL;
7196 
7197 		NDBG20(("mptsas_handle_event_sync: SAS topology change"));
7198 
7199 		sas_topo_change_list = (pMpi2EventDataSasTopologyChangeList_t)
7200 		    eventreply->EventData;
7201 
7202 		enc_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7203 		    &sas_topo_change_list->EnclosureHandle);
7204 		expd_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7205 		    &sas_topo_change_list->ExpanderDevHandle);
7206 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7207 		    &sas_topo_change_list->NumEntries);
7208 		start_phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
7209 		    &sas_topo_change_list->StartPhyNum);
7210 		expstatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7211 		    &sas_topo_change_list->ExpStatus);
7212 		physport = ddi_get8(mpt->m_acc_reply_frame_hdl,
7213 		    &sas_topo_change_list->PhysicalPort);
7214 
7215 		string[0] = 0;
7216 		if (expd_handle) {
7217 			flags = MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED;
7218 			switch (expstatus) {
7219 			case MPI2_EVENT_SAS_TOPO_ES_ADDED:
7220 				(void) sprintf(string, " added");
7221 				/*
7222 				 * New expander device added
7223 				 */
7224 				mpt->m_port_chng = 1;
7225 				topo_node = kmem_zalloc(
7226 				    sizeof (mptsas_topo_change_list_t),
7227 				    KM_SLEEP);
7228 				topo_node->mpt = mpt;
7229 				topo_node->event = MPTSAS_DR_EVENT_RECONFIG_SMP;
7230 				topo_node->un.physport = physport;
7231 				topo_node->devhdl = expd_handle;
7232 				topo_node->flags = flags;
7233 				topo_node->object = NULL;
7234 				if (topo_head == NULL) {
7235 					topo_head = topo_tail = topo_node;
7236 				} else {
7237 					topo_tail->next = topo_node;
7238 					topo_tail = topo_node;
7239 				}
7240 				break;
7241 			case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
7242 				(void) sprintf(string, " not responding, "
7243 				    "removed");
7244 				psmp = refhash_linear_search(mpt->m_smp_targets,
7245 				    mptsas_smp_eval_devhdl, &expd_handle);
7246 				if (psmp == NULL)
7247 					break;
7248 
7249 				topo_node = kmem_zalloc(
7250 				    sizeof (mptsas_topo_change_list_t),
7251 				    KM_SLEEP);
7252 				topo_node->mpt = mpt;
7253 				topo_node->un.phymask =
7254 				    psmp->m_addr.mta_phymask;
7255 				topo_node->event = MPTSAS_DR_EVENT_OFFLINE_SMP;
7256 				topo_node->devhdl = expd_handle;
7257 				topo_node->flags = flags;
7258 				topo_node->object = NULL;
7259 				if (topo_head == NULL) {
7260 					topo_head = topo_tail = topo_node;
7261 				} else {
7262 					topo_tail->next = topo_node;
7263 					topo_tail = topo_node;
7264 				}
7265 				break;
7266 			case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
7267 				break;
7268 			case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
7269 				(void) sprintf(string, " not responding, "
7270 				    "delaying removal");
7271 				break;
7272 			default:
7273 				break;
7274 			}
7275 		} else {
7276 			flags = MPTSAS_TOPO_FLAG_DIRECT_ATTACHED_DEVICE;
7277 		}
7278 
7279 		NDBG20(("SAS TOPOLOGY CHANGE for enclosure %x expander %x%s\n",
7280 		    enc_handle, expd_handle, string));
7281 		for (i = 0; i < num_entries; i++) {
7282 			phy = i + start_phy_num;
7283 			phystatus = ddi_get8(mpt->m_acc_reply_frame_hdl,
7284 			    &sas_topo_change_list->PHY[i].PhyStatus);
7285 			dev_handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7286 			    &sas_topo_change_list->PHY[i].AttachedDevHandle);
7287 			reason_code = phystatus & MPI2_EVENT_SAS_TOPO_RC_MASK;
7288 			/*
7289 			 * Filter out processing of Phy Vacant Status unless
7290 			 * the reason code is "Not Responding".  Process all
7291 			 * other combinations of Phy Status and Reason Codes.
7292 			 */
7293 			if ((phystatus &
7294 			    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) &&
7295 			    (reason_code !=
7296 			    MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) {
7297 				continue;
7298 			}
7299 			curr[0] = 0;
7300 			prev[0] = 0;
7301 			string[0] = 0;
7302 			switch (reason_code) {
7303 			case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
7304 			{
7305 				NDBG20(("mptsas%d phy %d physical_port %d "
7306 				    "dev_handle %d added", mpt->m_instance, phy,
7307 				    physport, dev_handle));
7308 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7309 				    &sas_topo_change_list->PHY[i].LinkRate);
7310 				state = (link_rate &
7311 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7312 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7313 				switch (state) {
7314 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7315 					(void) sprintf(curr, "is disabled");
7316 					break;
7317 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7318 					(void) sprintf(curr, "is offline, "
7319 					    "failed speed negotiation");
7320 					break;
7321 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7322 					(void) sprintf(curr, "SATA OOB "
7323 					    "complete");
7324 					break;
7325 				case SMP_RESET_IN_PROGRESS:
7326 					(void) sprintf(curr, "SMP reset in "
7327 					    "progress");
7328 					break;
7329 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7330 					(void) sprintf(curr, "is online at "
7331 					    "1.5 Gbps");
7332 					break;
7333 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7334 					(void) sprintf(curr, "is online at 3.0 "
7335 					    "Gbps");
7336 					break;
7337 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7338 					(void) sprintf(curr, "is online at 6.0 "
7339 					    "Gbps");
7340 					break;
7341 				case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7342 					(void) sprintf(curr,
7343 					    "is online at 12.0 Gbps");
7344 					break;
7345 				default:
7346 					(void) sprintf(curr, "state is "
7347 					    "unknown");
7348 					break;
7349 				}
7350 				/*
7351 				 * New target device added into the system.
7352 				 * Set association flag according to if an
7353 				 * expander is used or not.
7354 				 */
7355 				exp_flag =
7356 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7357 				if (flags ==
7358 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7359 					flags = exp_flag;
7360 				}
7361 				topo_node = kmem_zalloc(
7362 				    sizeof (mptsas_topo_change_list_t),
7363 				    KM_SLEEP);
7364 				topo_node->mpt = mpt;
7365 				topo_node->event =
7366 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
7367 				if (expd_handle == 0) {
7368 					/*
7369 					 * Per MPI 2, if expander dev handle
7370 					 * is 0, it's a directly attached
7371 					 * device. So driver use PHY to decide
7372 					 * which iport is associated
7373 					 */
7374 					physport = phy;
7375 					mpt->m_port_chng = 1;
7376 				}
7377 				topo_node->un.physport = physport;
7378 				topo_node->devhdl = dev_handle;
7379 				topo_node->flags = flags;
7380 				topo_node->object = NULL;
7381 				if (topo_head == NULL) {
7382 					topo_head = topo_tail = topo_node;
7383 				} else {
7384 					topo_tail->next = topo_node;
7385 					topo_tail = topo_node;
7386 				}
7387 				break;
7388 			}
7389 			case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
7390 			{
7391 				NDBG20(("mptsas%d phy %d physical_port %d "
7392 				    "dev_handle %d removed", mpt->m_instance,
7393 				    phy, physport, dev_handle));
7394 				/*
7395 				 * Set association flag according to if an
7396 				 * expander is used or not.
7397 				 */
7398 				exp_flag =
7399 				    MPTSAS_TOPO_FLAG_EXPANDER_ATTACHED_DEVICE;
7400 				if (flags ==
7401 				    MPTSAS_TOPO_FLAG_EXPANDER_ASSOCIATED) {
7402 					flags = exp_flag;
7403 				}
7404 				/*
7405 				 * Target device is removed from the system
7406 				 * Before the device is really offline from
7407 				 * from system.
7408 				 */
7409 				ptgt = refhash_linear_search(mpt->m_targets,
7410 				    mptsas_target_eval_devhdl, &dev_handle);
7411 				/*
7412 				 * If ptgt is NULL here, it means that the
7413 				 * DevHandle is not in the hash table.  This is
7414 				 * reasonable sometimes.  For example, if a
7415 				 * disk was pulled, then added, then pulled
7416 				 * again, the disk will not have been put into
7417 				 * the hash table because the add event will
7418 				 * have an invalid phymask.  BUT, this does not
7419 				 * mean that the DevHandle is invalid.  The
7420 				 * controller will still have a valid DevHandle
7421 				 * that must be removed.  To do this, use the
7422 				 * MPTSAS_TOPO_FLAG_REMOVE_HANDLE event.
7423 				 */
7424 				if (ptgt == NULL) {
7425 					topo_node = kmem_zalloc(
7426 					    sizeof (mptsas_topo_change_list_t),
7427 					    KM_SLEEP);
7428 					topo_node->mpt = mpt;
7429 					topo_node->un.phymask = 0;
7430 					topo_node->event =
7431 					    MPTSAS_TOPO_FLAG_REMOVE_HANDLE;
7432 					topo_node->devhdl = dev_handle;
7433 					topo_node->flags = flags;
7434 					topo_node->object = NULL;
7435 					if (topo_head == NULL) {
7436 						topo_head = topo_tail =
7437 						    topo_node;
7438 					} else {
7439 						topo_tail->next = topo_node;
7440 						topo_tail = topo_node;
7441 					}
7442 					break;
7443 				}
7444 
7445 				/*
7446 				 * Update DR flag immediately avoid I/O failure
7447 				 * before failover finish. Pay attention to the
7448 				 * mutex protect, we need grab m_tx_waitq_mutex
7449 				 * during set m_dr_flag because we won't add
7450 				 * the following command into waitq, instead,
7451 				 * we need return TRAN_BUSY in the tran_start
7452 				 * context.
7453 				 */
7454 				mutex_enter(&mpt->m_tx_waitq_mutex);
7455 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7456 				mutex_exit(&mpt->m_tx_waitq_mutex);
7457 
7458 				topo_node = kmem_zalloc(
7459 				    sizeof (mptsas_topo_change_list_t),
7460 				    KM_SLEEP);
7461 				topo_node->mpt = mpt;
7462 				topo_node->un.phymask =
7463 				    ptgt->m_addr.mta_phymask;
7464 				topo_node->event =
7465 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
7466 				topo_node->devhdl = dev_handle;
7467 				topo_node->flags = flags;
7468 				topo_node->object = NULL;
7469 				if (topo_head == NULL) {
7470 					topo_head = topo_tail = topo_node;
7471 				} else {
7472 					topo_tail->next = topo_node;
7473 					topo_tail = topo_node;
7474 				}
7475 				break;
7476 			}
7477 			case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
7478 				link_rate = ddi_get8(mpt->m_acc_reply_frame_hdl,
7479 				    &sas_topo_change_list->PHY[i].LinkRate);
7480 				state = (link_rate &
7481 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK) >>
7482 				    MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT;
7483 				pSmhba = &mpt->m_phy_info[i].smhba_info;
7484 				pSmhba->negotiated_link_rate = state;
7485 				switch (state) {
7486 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7487 					(void) sprintf(curr, "is disabled");
7488 					mptsas_smhba_log_sysevent(mpt,
7489 					    ESC_SAS_PHY_EVENT,
7490 					    SAS_PHY_REMOVE,
7491 					    &mpt->m_phy_info[i].smhba_info);
7492 					mpt->m_phy_info[i].smhba_info.
7493 					    negotiated_link_rate
7494 					    = 0x1;
7495 					break;
7496 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7497 					(void) sprintf(curr, "is offline, "
7498 					    "failed speed negotiation");
7499 					mptsas_smhba_log_sysevent(mpt,
7500 					    ESC_SAS_PHY_EVENT,
7501 					    SAS_PHY_OFFLINE,
7502 					    &mpt->m_phy_info[i].smhba_info);
7503 					break;
7504 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7505 					(void) sprintf(curr, "SATA OOB "
7506 					    "complete");
7507 					break;
7508 				case SMP_RESET_IN_PROGRESS:
7509 					(void) sprintf(curr, "SMP reset in "
7510 					    "progress");
7511 					break;
7512 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7513 					(void) sprintf(curr, "is online at "
7514 					    "1.5 Gbps");
7515 					if ((expd_handle == 0) &&
7516 					    (enc_handle == 1)) {
7517 						mpt->m_port_chng = 1;
7518 					}
7519 					mptsas_smhba_log_sysevent(mpt,
7520 					    ESC_SAS_PHY_EVENT,
7521 					    SAS_PHY_ONLINE,
7522 					    &mpt->m_phy_info[i].smhba_info);
7523 					break;
7524 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7525 					(void) sprintf(curr, "is online at 3.0 "
7526 					    "Gbps");
7527 					if ((expd_handle == 0) &&
7528 					    (enc_handle == 1)) {
7529 						mpt->m_port_chng = 1;
7530 					}
7531 					mptsas_smhba_log_sysevent(mpt,
7532 					    ESC_SAS_PHY_EVENT,
7533 					    SAS_PHY_ONLINE,
7534 					    &mpt->m_phy_info[i].smhba_info);
7535 					break;
7536 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7537 					(void) sprintf(curr, "is online at "
7538 					    "6.0 Gbps");
7539 					if ((expd_handle == 0) &&
7540 					    (enc_handle == 1)) {
7541 						mpt->m_port_chng = 1;
7542 					}
7543 					mptsas_smhba_log_sysevent(mpt,
7544 					    ESC_SAS_PHY_EVENT,
7545 					    SAS_PHY_ONLINE,
7546 					    &mpt->m_phy_info[i].smhba_info);
7547 					break;
7548 				case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7549 					(void) sprintf(curr, "is online at "
7550 					    "12.0 Gbps");
7551 					if ((expd_handle == 0) &&
7552 					    (enc_handle == 1)) {
7553 						mpt->m_port_chng = 1;
7554 					}
7555 					mptsas_smhba_log_sysevent(mpt,
7556 					    ESC_SAS_PHY_EVENT,
7557 					    SAS_PHY_ONLINE,
7558 					    &mpt->m_phy_info[i].smhba_info);
7559 					break;
7560 				default:
7561 					(void) sprintf(curr, "state is "
7562 					    "unknown");
7563 					break;
7564 				}
7565 
7566 				state = (link_rate &
7567 				    MPI2_EVENT_SAS_TOPO_LR_PREV_MASK) >>
7568 				    MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT;
7569 				switch (state) {
7570 				case MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED:
7571 					(void) sprintf(prev, ", was disabled");
7572 					break;
7573 				case MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED:
7574 					(void) sprintf(prev, ", was offline, "
7575 					    "failed speed negotiation");
7576 					break;
7577 				case MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE:
7578 					(void) sprintf(prev, ", was SATA OOB "
7579 					    "complete");
7580 					break;
7581 				case SMP_RESET_IN_PROGRESS:
7582 					(void) sprintf(prev, ", was SMP reset "
7583 					    "in progress");
7584 					break;
7585 				case MPI2_EVENT_SAS_TOPO_LR_RATE_1_5:
7586 					(void) sprintf(prev, ", was online at "
7587 					    "1.5 Gbps");
7588 					break;
7589 				case MPI2_EVENT_SAS_TOPO_LR_RATE_3_0:
7590 					(void) sprintf(prev, ", was online at "
7591 					    "3.0 Gbps");
7592 					break;
7593 				case MPI2_EVENT_SAS_TOPO_LR_RATE_6_0:
7594 					(void) sprintf(prev, ", was online at "
7595 					    "6.0 Gbps");
7596 					break;
7597 				case MPI25_EVENT_SAS_TOPO_LR_RATE_12_0:
7598 					(void) sprintf(prev, ", was online at "
7599 					    "12.0 Gbps");
7600 					break;
7601 				default:
7602 				break;
7603 				}
7604 				(void) sprintf(&string[strlen(string)], "link "
7605 				    "changed, ");
7606 				break;
7607 			case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
7608 				continue;
7609 			case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
7610 				(void) sprintf(&string[strlen(string)],
7611 				    "target not responding, delaying "
7612 				    "removal");
7613 				break;
7614 			}
7615 			NDBG20(("mptsas%d phy %d DevHandle %x, %s%s%s\n",
7616 			    mpt->m_instance, phy, dev_handle, string, curr,
7617 			    prev));
7618 		}
7619 		if (topo_head != NULL) {
7620 			/*
7621 			 * Launch DR taskq to handle topology change
7622 			 */
7623 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7624 			    mptsas_handle_dr, (void *)topo_head,
7625 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
7626 				while (topo_head != NULL) {
7627 					topo_node = topo_head;
7628 					topo_head = topo_head->next;
7629 					kmem_free(topo_node,
7630 					    sizeof (mptsas_topo_change_list_t));
7631 				}
7632 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7633 				    "for handle SAS DR event failed. \n");
7634 			}
7635 		}
7636 		break;
7637 	}
7638 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
7639 	{
7640 		Mpi2EventDataIrConfigChangeList_t	*irChangeList;
7641 		mptsas_topo_change_list_t		*topo_head = NULL;
7642 		mptsas_topo_change_list_t		*topo_tail = NULL;
7643 		mptsas_topo_change_list_t		*topo_node = NULL;
7644 		mptsas_target_t				*ptgt;
7645 		uint8_t					num_entries, i, reason;
7646 		uint16_t				volhandle, diskhandle;
7647 
7648 		irChangeList = (pMpi2EventDataIrConfigChangeList_t)
7649 		    eventreply->EventData;
7650 		num_entries = ddi_get8(mpt->m_acc_reply_frame_hdl,
7651 		    &irChangeList->NumElements);
7652 
7653 		NDBG20(("mptsas%d IR_CONFIGURATION_CHANGE_LIST event received",
7654 		    mpt->m_instance));
7655 
7656 		for (i = 0; i < num_entries; i++) {
7657 			reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
7658 			    &irChangeList->ConfigElement[i].ReasonCode);
7659 			volhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7660 			    &irChangeList->ConfigElement[i].VolDevHandle);
7661 			diskhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
7662 			    &irChangeList->ConfigElement[i].PhysDiskDevHandle);
7663 
7664 			switch (reason) {
7665 			case MPI2_EVENT_IR_CHANGE_RC_ADDED:
7666 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
7667 			{
7668 				NDBG20(("mptsas %d volume added\n",
7669 				    mpt->m_instance));
7670 
7671 				topo_node = kmem_zalloc(
7672 				    sizeof (mptsas_topo_change_list_t),
7673 				    KM_SLEEP);
7674 
7675 				topo_node->mpt = mpt;
7676 				topo_node->event =
7677 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
7678 				topo_node->un.physport = 0xff;
7679 				topo_node->devhdl = volhandle;
7680 				topo_node->flags =
7681 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7682 				topo_node->object = NULL;
7683 				if (topo_head == NULL) {
7684 					topo_head = topo_tail = topo_node;
7685 				} else {
7686 					topo_tail->next = topo_node;
7687 					topo_tail = topo_node;
7688 				}
7689 				break;
7690 			}
7691 			case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
7692 			case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
7693 			{
7694 				NDBG20(("mptsas %d volume deleted\n",
7695 				    mpt->m_instance));
7696 				ptgt = refhash_linear_search(mpt->m_targets,
7697 				    mptsas_target_eval_devhdl, &volhandle);
7698 				if (ptgt == NULL)
7699 					break;
7700 
7701 				/*
7702 				 * Clear any flags related to volume
7703 				 */
7704 				(void) mptsas_delete_volume(mpt, volhandle);
7705 
7706 				/*
7707 				 * Update DR flag immediately avoid I/O failure
7708 				 */
7709 				mutex_enter(&mpt->m_tx_waitq_mutex);
7710 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7711 				mutex_exit(&mpt->m_tx_waitq_mutex);
7712 
7713 				topo_node = kmem_zalloc(
7714 				    sizeof (mptsas_topo_change_list_t),
7715 				    KM_SLEEP);
7716 				topo_node->mpt = mpt;
7717 				topo_node->un.phymask =
7718 				    ptgt->m_addr.mta_phymask;
7719 				topo_node->event =
7720 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
7721 				topo_node->devhdl = volhandle;
7722 				topo_node->flags =
7723 				    MPTSAS_TOPO_FLAG_RAID_ASSOCIATED;
7724 				topo_node->object = (void *)ptgt;
7725 				if (topo_head == NULL) {
7726 					topo_head = topo_tail = topo_node;
7727 				} else {
7728 					topo_tail->next = topo_node;
7729 					topo_tail = topo_node;
7730 				}
7731 				break;
7732 			}
7733 			case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
7734 			case MPI2_EVENT_IR_CHANGE_RC_HIDE:
7735 			{
7736 				ptgt = refhash_linear_search(mpt->m_targets,
7737 				    mptsas_target_eval_devhdl, &diskhandle);
7738 				if (ptgt == NULL)
7739 					break;
7740 
7741 				/*
7742 				 * Update DR flag immediately avoid I/O failure
7743 				 */
7744 				mutex_enter(&mpt->m_tx_waitq_mutex);
7745 				ptgt->m_dr_flag = MPTSAS_DR_INTRANSITION;
7746 				mutex_exit(&mpt->m_tx_waitq_mutex);
7747 
7748 				topo_node = kmem_zalloc(
7749 				    sizeof (mptsas_topo_change_list_t),
7750 				    KM_SLEEP);
7751 				topo_node->mpt = mpt;
7752 				topo_node->un.phymask =
7753 				    ptgt->m_addr.mta_phymask;
7754 				topo_node->event =
7755 				    MPTSAS_DR_EVENT_OFFLINE_TARGET;
7756 				topo_node->devhdl = diskhandle;
7757 				topo_node->flags =
7758 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7759 				topo_node->object = (void *)ptgt;
7760 				if (topo_head == NULL) {
7761 					topo_head = topo_tail = topo_node;
7762 				} else {
7763 					topo_tail->next = topo_node;
7764 					topo_tail = topo_node;
7765 				}
7766 				break;
7767 			}
7768 			case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
7769 			case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
7770 			{
7771 				/*
7772 				 * The physical drive is released by a IR
7773 				 * volume. But we cannot get the the physport
7774 				 * or phynum from the event data, so we only
7775 				 * can get the physport/phynum after SAS
7776 				 * Device Page0 request for the devhdl.
7777 				 */
7778 				topo_node = kmem_zalloc(
7779 				    sizeof (mptsas_topo_change_list_t),
7780 				    KM_SLEEP);
7781 				topo_node->mpt = mpt;
7782 				topo_node->un.phymask = 0;
7783 				topo_node->event =
7784 				    MPTSAS_DR_EVENT_RECONFIG_TARGET;
7785 				topo_node->devhdl = diskhandle;
7786 				topo_node->flags =
7787 				    MPTSAS_TOPO_FLAG_RAID_PHYSDRV_ASSOCIATED;
7788 				topo_node->object = NULL;
7789 				mpt->m_port_chng = 1;
7790 				if (topo_head == NULL) {
7791 					topo_head = topo_tail = topo_node;
7792 				} else {
7793 					topo_tail->next = topo_node;
7794 					topo_tail = topo_node;
7795 				}
7796 				break;
7797 			}
7798 			default:
7799 				break;
7800 			}
7801 		}
7802 
7803 		if (topo_head != NULL) {
7804 			/*
7805 			 * Launch DR taskq to handle topology change
7806 			 */
7807 			if ((ddi_taskq_dispatch(mpt->m_dr_taskq,
7808 			    mptsas_handle_dr, (void *)topo_head,
7809 			    DDI_NOSLEEP)) != DDI_SUCCESS) {
7810 				while (topo_head != NULL) {
7811 					topo_node = topo_head;
7812 					topo_head = topo_head->next;
7813 					kmem_free(topo_node,
7814 					    sizeof (mptsas_topo_change_list_t));
7815 				}
7816 				mptsas_log(mpt, CE_NOTE, "mptsas start taskq "
7817 				    "for handle SAS DR event failed. \n");
7818 			}
7819 		}
7820 		break;
7821 	}
7822 	default:
7823 		return (DDI_FAILURE);
7824 	}
7825 
7826 	return (DDI_SUCCESS);
7827 }
7828 
7829 /*
7830  * handle events from ioc
7831  */
7832 static void
mptsas_handle_event(void * args)7833 mptsas_handle_event(void *args)
7834 {
7835 	m_replyh_arg_t			*replyh_arg;
7836 	pMpi2EventNotificationReply_t	eventreply;
7837 	uint32_t			event, iocloginfo, rfm;
7838 	uint32_t			status;
7839 	uint8_t				port;
7840 	mptsas_t			*mpt;
7841 	uint_t				iocstatus;
7842 
7843 	replyh_arg = (m_replyh_arg_t *)args;
7844 	rfm = replyh_arg->rfm;
7845 	mpt = replyh_arg->mpt;
7846 
7847 	mutex_enter(&mpt->m_mutex);
7848 	/*
7849 	 * If HBA is being reset, drop incoming event.
7850 	 */
7851 	if (mpt->m_in_reset) {
7852 		NDBG20(("dropping event received prior to reset"));
7853 		mutex_exit(&mpt->m_mutex);
7854 		return;
7855 	}
7856 
7857 	eventreply = (pMpi2EventNotificationReply_t)
7858 	    (mpt->m_reply_frame + (rfm -
7859 	    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
7860 	event = ddi_get16(mpt->m_acc_reply_frame_hdl, &eventreply->Event);
7861 
7862 	if ((iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
7863 	    &eventreply->IOCStatus)) != 0) {
7864 		if (iocstatus == MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
7865 			mptsas_log(mpt, CE_WARN,
7866 			    "!mptsas_handle_event: IOCStatus=0x%x, "
7867 			    "IOCLogInfo=0x%x", iocstatus,
7868 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7869 			    &eventreply->IOCLogInfo));
7870 		} else {
7871 			mptsas_log(mpt, CE_WARN,
7872 			    "mptsas_handle_event: IOCStatus=0x%x, "
7873 			    "IOCLogInfo=0x%x", iocstatus,
7874 			    ddi_get32(mpt->m_acc_reply_frame_hdl,
7875 			    &eventreply->IOCLogInfo));
7876 		}
7877 	}
7878 
7879 	/*
7880 	 * figure out what kind of event we got and handle accordingly
7881 	 */
7882 	switch (event) {
7883 	case MPI2_EVENT_LOG_ENTRY_ADDED:
7884 		break;
7885 	case MPI2_EVENT_LOG_DATA:
7886 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
7887 		    &eventreply->IOCLogInfo);
7888 		NDBG20(("mptsas %d log info %x received.\n", mpt->m_instance,
7889 		    iocloginfo));
7890 		break;
7891 	case MPI2_EVENT_STATE_CHANGE:
7892 		NDBG20(("mptsas%d state change.", mpt->m_instance));
7893 		break;
7894 	case MPI2_EVENT_HARD_RESET_RECEIVED:
7895 		NDBG20(("mptsas%d event change.", mpt->m_instance));
7896 		break;
7897 	case MPI2_EVENT_SAS_DISCOVERY:
7898 	{
7899 		MPI2_EVENT_DATA_SAS_DISCOVERY	*sasdiscovery;
7900 		char				string[80];
7901 		uint8_t				rc;
7902 
7903 		sasdiscovery =
7904 		    (pMpi2EventDataSasDiscovery_t)eventreply->EventData;
7905 
7906 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7907 		    &sasdiscovery->ReasonCode);
7908 		port = ddi_get8(mpt->m_acc_reply_frame_hdl,
7909 		    &sasdiscovery->PhysicalPort);
7910 		status = ddi_get32(mpt->m_acc_reply_frame_hdl,
7911 		    &sasdiscovery->DiscoveryStatus);
7912 
7913 		string[0] = 0;
7914 		switch (rc) {
7915 		case MPI2_EVENT_SAS_DISC_RC_STARTED:
7916 			(void) sprintf(string, "STARTING");
7917 			break;
7918 		case MPI2_EVENT_SAS_DISC_RC_COMPLETED:
7919 			(void) sprintf(string, "COMPLETED");
7920 			break;
7921 		default:
7922 			(void) sprintf(string, "UNKNOWN");
7923 			break;
7924 		}
7925 
7926 		NDBG20(("SAS DISCOVERY is %s for port %d, status %x", string,
7927 		    port, status));
7928 
7929 		break;
7930 	}
7931 	case MPI2_EVENT_EVENT_CHANGE:
7932 		NDBG20(("mptsas%d event change.", mpt->m_instance));
7933 		break;
7934 	case MPI2_EVENT_TASK_SET_FULL:
7935 	{
7936 		pMpi2EventDataTaskSetFull_t	taskfull;
7937 
7938 		taskfull = (pMpi2EventDataTaskSetFull_t)eventreply->EventData;
7939 
7940 		NDBG20(("TASK_SET_FULL received for mptsas%d, depth %d\n",
7941 		    mpt->m_instance,  ddi_get16(mpt->m_acc_reply_frame_hdl,
7942 		    &taskfull->CurrentDepth)));
7943 		break;
7944 	}
7945 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
7946 	{
7947 		/*
7948 		 * SAS TOPOLOGY CHANGE LIST Event has already been handled
7949 		 * in mptsas_handle_event_sync() of interrupt context
7950 		 */
7951 		break;
7952 	}
7953 	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
7954 	{
7955 		pMpi2EventDataSasEnclDevStatusChange_t	encstatus;
7956 		uint8_t					rc;
7957 		uint16_t				enchdl;
7958 		char					string[80];
7959 		mptsas_enclosure_t			*mep;
7960 
7961 		encstatus = (pMpi2EventDataSasEnclDevStatusChange_t)
7962 		    eventreply->EventData;
7963 
7964 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
7965 		    &encstatus->ReasonCode);
7966 		enchdl = ddi_get16(mpt->m_acc_reply_frame_hdl,
7967 		    &encstatus->EnclosureHandle);
7968 
7969 		switch (rc) {
7970 		case MPI2_EVENT_SAS_ENCL_RC_ADDED:
7971 			(void) sprintf(string, "added");
7972 			break;
7973 		case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
7974 			mep = mptsas_enc_lookup(mpt, enchdl);
7975 			if (mep != NULL) {
7976 				list_remove(&mpt->m_enclosures, mep);
7977 				mptsas_enc_free(mep);
7978 				mep = NULL;
7979 			}
7980 			(void) sprintf(string, ", not responding");
7981 			break;
7982 		default:
7983 		break;
7984 		}
7985 		NDBG20(("mptsas%d ENCLOSURE STATUS CHANGE for enclosure "
7986 		    "%x%s\n", mpt->m_instance,
7987 		    ddi_get16(mpt->m_acc_reply_frame_hdl,
7988 		    &encstatus->EnclosureHandle), string));
7989 
7990 		/*
7991 		 * No matter what has happened, update all of our device state
7992 		 * for enclosures, by retriggering an evaluation.
7993 		 */
7994 		mpt->m_done_traverse_enc = 0;
7995 		mptsas_update_hashtab(mpt);
7996 		break;
7997 	}
7998 
7999 	/*
8000 	 * MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE is handled by
8001 	 * mptsas_handle_event_sync,in here just send ack message.
8002 	 */
8003 	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
8004 	{
8005 		pMpi2EventDataSasDeviceStatusChange_t	statuschange;
8006 		uint8_t					rc;
8007 		uint16_t				devhdl;
8008 		uint64_t				wwn = 0;
8009 		uint32_t				wwn_lo, wwn_hi;
8010 
8011 		statuschange = (pMpi2EventDataSasDeviceStatusChange_t)
8012 		    eventreply->EventData;
8013 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
8014 		    &statuschange->ReasonCode);
8015 		wwn_lo = ddi_get32(mpt->m_acc_reply_frame_hdl,
8016 		    (uint32_t *)(void *)&statuschange->SASAddress);
8017 		wwn_hi = ddi_get32(mpt->m_acc_reply_frame_hdl,
8018 		    (uint32_t *)(void *)&statuschange->SASAddress + 1);
8019 		wwn = ((uint64_t)wwn_hi << 32) | wwn_lo;
8020 		devhdl =  ddi_get16(mpt->m_acc_reply_frame_hdl,
8021 		    &statuschange->DevHandle);
8022 
8023 		NDBG13(("MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE wwn is %"PRIx64,
8024 		    wwn));
8025 
8026 		switch (rc) {
8027 		case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
8028 			NDBG20(("SMART data received, ASC/ASCQ = %02x/%02x",
8029 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
8030 			    &statuschange->ASC),
8031 			    ddi_get8(mpt->m_acc_reply_frame_hdl,
8032 			    &statuschange->ASCQ)));
8033 			break;
8034 
8035 		case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
8036 			NDBG20(("Device not supported"));
8037 			break;
8038 
8039 		case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
8040 			NDBG20(("IOC internally generated the Target Reset "
8041 			    "for devhdl:%x", devhdl));
8042 			break;
8043 
8044 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
8045 			NDBG20(("IOC's internally generated Target Reset "
8046 			    "completed for devhdl:%x", devhdl));
8047 			break;
8048 
8049 		case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
8050 			NDBG20(("IOC internally generated Abort Task"));
8051 			break;
8052 
8053 		case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
8054 			NDBG20(("IOC's internally generated Abort Task "
8055 			    "completed"));
8056 			break;
8057 
8058 		case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
8059 			NDBG20(("IOC internally generated Abort Task Set"));
8060 			break;
8061 
8062 		case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
8063 			NDBG20(("IOC internally generated Clear Task Set"));
8064 			break;
8065 
8066 		case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
8067 			NDBG20(("IOC internally generated Query Task"));
8068 			break;
8069 
8070 		case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
8071 			NDBG20(("Device sent an Asynchronous Notification"));
8072 			break;
8073 
8074 		default:
8075 			break;
8076 		}
8077 		break;
8078 	}
8079 	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
8080 	{
8081 		/*
8082 		 * IR TOPOLOGY CHANGE LIST Event has already been handled
8083 		 * in mpt_handle_event_sync() of interrupt context
8084 		 */
8085 		break;
8086 	}
8087 	case MPI2_EVENT_IR_OPERATION_STATUS:
8088 	{
8089 		Mpi2EventDataIrOperationStatus_t	*irOpStatus;
8090 		char					reason_str[80];
8091 		uint8_t					rc, percent;
8092 		uint16_t				handle;
8093 
8094 		irOpStatus = (pMpi2EventDataIrOperationStatus_t)
8095 		    eventreply->EventData;
8096 		rc = ddi_get8(mpt->m_acc_reply_frame_hdl,
8097 		    &irOpStatus->RAIDOperation);
8098 		percent = ddi_get8(mpt->m_acc_reply_frame_hdl,
8099 		    &irOpStatus->PercentComplete);
8100 		handle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8101 		    &irOpStatus->VolDevHandle);
8102 
8103 		switch (rc) {
8104 			case MPI2_EVENT_IR_RAIDOP_RESYNC:
8105 				(void) sprintf(reason_str, "resync");
8106 				break;
8107 			case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
8108 				(void) sprintf(reason_str, "online capacity "
8109 				    "expansion");
8110 				break;
8111 			case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
8112 				(void) sprintf(reason_str, "consistency check");
8113 				break;
8114 			default:
8115 				(void) sprintf(reason_str, "unknown reason %x",
8116 				    rc);
8117 		}
8118 
8119 		NDBG20(("mptsas%d raid operational status: (%s)"
8120 		    "\thandle(0x%04x), percent complete(%d)\n",
8121 		    mpt->m_instance, reason_str, handle, percent));
8122 		break;
8123 	}
8124 	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
8125 	{
8126 		pMpi2EventDataSasBroadcastPrimitive_t	sas_broadcast;
8127 		uint8_t					phy_num;
8128 		uint8_t					primitive;
8129 
8130 		sas_broadcast = (pMpi2EventDataSasBroadcastPrimitive_t)
8131 		    eventreply->EventData;
8132 
8133 		phy_num = ddi_get8(mpt->m_acc_reply_frame_hdl,
8134 		    &sas_broadcast->PhyNum);
8135 		primitive = ddi_get8(mpt->m_acc_reply_frame_hdl,
8136 		    &sas_broadcast->Primitive);
8137 
8138 		switch (primitive) {
8139 		case MPI2_EVENT_PRIMITIVE_CHANGE:
8140 			mptsas_smhba_log_sysevent(mpt,
8141 			    ESC_SAS_HBA_PORT_BROADCAST,
8142 			    SAS_PORT_BROADCAST_CHANGE,
8143 			    &mpt->m_phy_info[phy_num].smhba_info);
8144 			break;
8145 		case MPI2_EVENT_PRIMITIVE_SES:
8146 			mptsas_smhba_log_sysevent(mpt,
8147 			    ESC_SAS_HBA_PORT_BROADCAST,
8148 			    SAS_PORT_BROADCAST_SES,
8149 			    &mpt->m_phy_info[phy_num].smhba_info);
8150 			break;
8151 		case MPI2_EVENT_PRIMITIVE_EXPANDER:
8152 			mptsas_smhba_log_sysevent(mpt,
8153 			    ESC_SAS_HBA_PORT_BROADCAST,
8154 			    SAS_PORT_BROADCAST_D01_4,
8155 			    &mpt->m_phy_info[phy_num].smhba_info);
8156 			break;
8157 		case MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT:
8158 			mptsas_smhba_log_sysevent(mpt,
8159 			    ESC_SAS_HBA_PORT_BROADCAST,
8160 			    SAS_PORT_BROADCAST_D04_7,
8161 			    &mpt->m_phy_info[phy_num].smhba_info);
8162 			break;
8163 		case MPI2_EVENT_PRIMITIVE_RESERVED3:
8164 			mptsas_smhba_log_sysevent(mpt,
8165 			    ESC_SAS_HBA_PORT_BROADCAST,
8166 			    SAS_PORT_BROADCAST_D16_7,
8167 			    &mpt->m_phy_info[phy_num].smhba_info);
8168 			break;
8169 		case MPI2_EVENT_PRIMITIVE_RESERVED4:
8170 			mptsas_smhba_log_sysevent(mpt,
8171 			    ESC_SAS_HBA_PORT_BROADCAST,
8172 			    SAS_PORT_BROADCAST_D29_7,
8173 			    &mpt->m_phy_info[phy_num].smhba_info);
8174 			break;
8175 		case MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED:
8176 			mptsas_smhba_log_sysevent(mpt,
8177 			    ESC_SAS_HBA_PORT_BROADCAST,
8178 			    SAS_PORT_BROADCAST_D24_0,
8179 			    &mpt->m_phy_info[phy_num].smhba_info);
8180 			break;
8181 		case MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED:
8182 			mptsas_smhba_log_sysevent(mpt,
8183 			    ESC_SAS_HBA_PORT_BROADCAST,
8184 			    SAS_PORT_BROADCAST_D27_4,
8185 			    &mpt->m_phy_info[phy_num].smhba_info);
8186 			break;
8187 		default:
8188 			NDBG16(("mptsas%d: unknown BROADCAST PRIMITIVE"
8189 			    " %x received",
8190 			    mpt->m_instance, primitive));
8191 			break;
8192 		}
8193 		NDBG16(("mptsas%d sas broadcast primitive: "
8194 		    "\tprimitive(0x%04x), phy(%d) complete\n",
8195 		    mpt->m_instance, primitive, phy_num));
8196 		break;
8197 	}
8198 	case MPI2_EVENT_IR_VOLUME:
8199 	{
8200 		Mpi2EventDataIrVolume_t		*irVolume;
8201 		uint16_t			devhandle;
8202 		uint32_t			state;
8203 		int				config, vol;
8204 		uint8_t				found = FALSE;
8205 
8206 		irVolume = (pMpi2EventDataIrVolume_t)eventreply->EventData;
8207 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8208 		    &irVolume->NewValue);
8209 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8210 		    &irVolume->VolDevHandle);
8211 
8212 		NDBG20(("EVENT_IR_VOLUME event is received"));
8213 
8214 		/*
8215 		 * Get latest RAID info and then find the DevHandle for this
8216 		 * event in the configuration.  If the DevHandle is not found
8217 		 * just exit the event.
8218 		 */
8219 		(void) mptsas_get_raid_info(mpt);
8220 		for (config = 0; (config < mpt->m_num_raid_configs) &&
8221 		    (!found); config++) {
8222 			for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
8223 				if (mpt->m_raidconfig[config].m_raidvol[vol].
8224 				    m_raidhandle == devhandle) {
8225 					found = TRUE;
8226 					break;
8227 				}
8228 			}
8229 		}
8230 		if (!found) {
8231 			break;
8232 		}
8233 
8234 		switch (irVolume->ReasonCode) {
8235 		case MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED:
8236 		{
8237 			uint32_t i;
8238 			mpt->m_raidconfig[config].m_raidvol[vol].m_settings =
8239 			    state;
8240 
8241 			i = state & MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING;
8242 			mptsas_log(mpt, CE_NOTE, " Volume %d settings changed"
8243 			    ", auto-config of hot-swap drives is %s"
8244 			    ", write caching is %s"
8245 			    ", hot-spare pool mask is %02x\n",
8246 			    vol, state &
8247 			    MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE
8248 			    ? "disabled" : "enabled",
8249 			    i == MPI2_RAIDVOL0_SETTING_UNCHANGED
8250 			    ? "controlled by member disks" :
8251 			    i == MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING
8252 			    ? "disabled" :
8253 			    i == MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING
8254 			    ? "enabled" :
8255 			    "incorrectly set",
8256 			    (state >> 16) & 0xff);
8257 				break;
8258 		}
8259 		case MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED:
8260 		{
8261 			mpt->m_raidconfig[config].m_raidvol[vol].m_state =
8262 			    (uint8_t)state;
8263 
8264 			mptsas_log(mpt, CE_NOTE,
8265 			    "Volume %d is now %s\n", vol,
8266 			    state == MPI2_RAID_VOL_STATE_OPTIMAL
8267 			    ? "optimal" :
8268 			    state == MPI2_RAID_VOL_STATE_DEGRADED
8269 			    ? "degraded" :
8270 			    state == MPI2_RAID_VOL_STATE_ONLINE
8271 			    ? "online" :
8272 			    state == MPI2_RAID_VOL_STATE_INITIALIZING
8273 			    ? "initializing" :
8274 			    state == MPI2_RAID_VOL_STATE_FAILED
8275 			    ? "failed" :
8276 			    state == MPI2_RAID_VOL_STATE_MISSING
8277 			    ? "missing" :
8278 			    "state unknown");
8279 			break;
8280 		}
8281 		case MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED:
8282 		{
8283 			mpt->m_raidconfig[config].m_raidvol[vol].
8284 			    m_statusflags = state;
8285 
8286 			mptsas_log(mpt, CE_NOTE,
8287 			    " Volume %d is now %s%s%s%s%s%s%s%s%s\n",
8288 			    vol,
8289 			    state & MPI2_RAIDVOL0_STATUS_FLAG_ENABLED
8290 			    ? ", enabled" : ", disabled",
8291 			    state & MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED
8292 			    ? ", quiesced" : "",
8293 			    state & MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE
8294 			    ? ", inactive" : ", active",
8295 			    state &
8296 			    MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL
8297 			    ? ", bad block table is full" : "",
8298 			    state &
8299 			    MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS
8300 			    ? ", resync in progress" : "",
8301 			    state & MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT
8302 			    ? ", background initialization in progress" : "",
8303 			    state &
8304 			    MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION
8305 			    ? ", capacity expansion in progress" : "",
8306 			    state &
8307 			    MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK
8308 			    ? ", consistency check in progress" : "",
8309 			    state & MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB
8310 			    ? ", data scrub in progress" : "");
8311 			break;
8312 		}
8313 		default:
8314 			break;
8315 		}
8316 		break;
8317 	}
8318 	case MPI2_EVENT_IR_PHYSICAL_DISK:
8319 	{
8320 		Mpi2EventDataIrPhysicalDisk_t	*irPhysDisk;
8321 		uint16_t			devhandle, enchandle, slot;
8322 		uint32_t			status, state;
8323 		uint8_t				physdisknum, reason;
8324 
8325 		irPhysDisk = (Mpi2EventDataIrPhysicalDisk_t *)
8326 		    eventreply->EventData;
8327 		physdisknum = ddi_get8(mpt->m_acc_reply_frame_hdl,
8328 		    &irPhysDisk->PhysDiskNum);
8329 		devhandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8330 		    &irPhysDisk->PhysDiskDevHandle);
8331 		enchandle = ddi_get16(mpt->m_acc_reply_frame_hdl,
8332 		    &irPhysDisk->EnclosureHandle);
8333 		slot = ddi_get16(mpt->m_acc_reply_frame_hdl,
8334 		    &irPhysDisk->Slot);
8335 		state = ddi_get32(mpt->m_acc_reply_frame_hdl,
8336 		    &irPhysDisk->NewValue);
8337 		reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8338 		    &irPhysDisk->ReasonCode);
8339 
8340 		NDBG20(("EVENT_IR_PHYSICAL_DISK event is received"));
8341 
8342 		switch (reason) {
8343 		case MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED:
8344 			mptsas_log(mpt, CE_NOTE,
8345 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8346 			    "for enclosure with handle 0x%x is now in hot "
8347 			    "spare pool %d",
8348 			    physdisknum, devhandle, slot, enchandle,
8349 			    (state >> 16) & 0xff);
8350 			break;
8351 
8352 		case MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED:
8353 			status = state;
8354 			mptsas_log(mpt, CE_NOTE,
8355 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8356 			    "for enclosure with handle 0x%x is now "
8357 			    "%s%s%s%s%s\n", physdisknum, devhandle, slot,
8358 			    enchandle,
8359 			    status & MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME
8360 			    ? ", inactive" : ", active",
8361 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC
8362 			    ? ", out of sync" : "",
8363 			    status & MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED
8364 			    ? ", quiesced" : "",
8365 			    status &
8366 			    MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED
8367 			    ? ", write cache enabled" : "",
8368 			    status & MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET
8369 			    ? ", capacity expansion target" : "");
8370 			break;
8371 
8372 		case MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED:
8373 			mptsas_log(mpt, CE_NOTE,
8374 			    " PhysDiskNum %d with DevHandle 0x%x in slot %d "
8375 			    "for enclosure with handle 0x%x is now %s\n",
8376 			    physdisknum, devhandle, slot, enchandle,
8377 			    state == MPI2_RAID_PD_STATE_OPTIMAL
8378 			    ? "optimal" :
8379 			    state == MPI2_RAID_PD_STATE_REBUILDING
8380 			    ? "rebuilding" :
8381 			    state == MPI2_RAID_PD_STATE_DEGRADED
8382 			    ? "degraded" :
8383 			    state == MPI2_RAID_PD_STATE_HOT_SPARE
8384 			    ? "a hot spare" :
8385 			    state == MPI2_RAID_PD_STATE_ONLINE
8386 			    ? "online" :
8387 			    state == MPI2_RAID_PD_STATE_OFFLINE
8388 			    ? "offline" :
8389 			    state == MPI2_RAID_PD_STATE_NOT_COMPATIBLE
8390 			    ? "not compatible" :
8391 			    state == MPI2_RAID_PD_STATE_NOT_CONFIGURED
8392 			    ? "not configured" :
8393 			    "state unknown");
8394 			break;
8395 		}
8396 		break;
8397 	}
8398 	case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
8399 	{
8400 		pMpi26EventDataActiveCableExcept_t	actcable;
8401 		uint32_t power;
8402 		uint8_t reason, id;
8403 
8404 		actcable = (pMpi26EventDataActiveCableExcept_t)
8405 		    eventreply->EventData;
8406 		power = ddi_get32(mpt->m_acc_reply_frame_hdl,
8407 		    &actcable->ActiveCablePowerRequirement);
8408 		reason = ddi_get8(mpt->m_acc_reply_frame_hdl,
8409 		    &actcable->ReasonCode);
8410 		id = ddi_get8(mpt->m_acc_reply_frame_hdl,
8411 		    &actcable->ReceptacleID);
8412 
8413 		/*
8414 		 * It'd be nice if this weren't just logging to the system but
8415 		 * were telling FMA about the active cable problem and FMA was
8416 		 * aware of the cable topology and state.
8417 		 */
8418 		switch (reason) {
8419 		case MPI26_EVENT_ACTIVE_CABLE_PRESENT:
8420 			/* Don't log anything if it's fine */
8421 			break;
8422 		case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
8423 			mptsas_log(mpt, CE_WARN, "An active cable (id %u) does "
8424 			    "not have sufficient power to be enabled. "
8425 			    "Devices connected to this cable will not be "
8426 			    "visible to the system.", id);
8427 			if (power == UINT32_MAX) {
8428 				mptsas_log(mpt, CE_CONT, "The cable's power "
8429 				    "requirements are unknown.\n");
8430 			} else {
8431 				mptsas_log(mpt, CE_CONT, "The cable requires "
8432 				    "%u mW of power to function.\n", power);
8433 			}
8434 			break;
8435 		case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
8436 			mptsas_log(mpt, CE_WARN, "An active cable (id %u) is "
8437 			    "degraded and not running at its full speed. "
8438 			    "Some devices might not appear.", id);
8439 			break;
8440 		default:
8441 			break;
8442 		}
8443 		break;
8444 	}
8445 	case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
8446 	case MPI2_EVENT_PCIE_ENUMERATION:
8447 	case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
8448 	case MPI2_EVENT_PCIE_LINK_COUNTER:
8449 		mptsas_log(mpt, CE_NOTE, "Unhandled mpt_sas PCIe device "
8450 		    "event received (0x%x)", event);
8451 		break;
8452 	default:
8453 		NDBG20(("mptsas%d: unknown event %x received",
8454 		    mpt->m_instance, event));
8455 		break;
8456 	}
8457 
8458 	/*
8459 	 * Return the reply frame to the free queue.
8460 	 */
8461 	ddi_put32(mpt->m_acc_free_queue_hdl,
8462 	    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index], rfm);
8463 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
8464 	    DDI_DMA_SYNC_FORDEV);
8465 	if (++mpt->m_free_index == mpt->m_free_queue_depth) {
8466 		mpt->m_free_index = 0;
8467 	}
8468 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
8469 	    mpt->m_free_index);
8470 	mutex_exit(&mpt->m_mutex);
8471 }
8472 
8473 /*
8474  * invoked from timeout() to restart qfull cmds with throttle == 0
8475  */
8476 static void
mptsas_restart_cmd(void * arg)8477 mptsas_restart_cmd(void *arg)
8478 {
8479 	mptsas_t	*mpt = arg;
8480 	mptsas_target_t	*ptgt = NULL;
8481 
8482 	mutex_enter(&mpt->m_mutex);
8483 
8484 	mpt->m_restart_cmd_timeid = 0;
8485 
8486 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
8487 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
8488 		if (ptgt->m_reset_delay == 0) {
8489 			if (ptgt->m_t_throttle == QFULL_THROTTLE) {
8490 				mptsas_set_throttle(mpt, ptgt,
8491 				    MAX_THROTTLE);
8492 			}
8493 		}
8494 	}
8495 	mptsas_restart_hba(mpt);
8496 	mutex_exit(&mpt->m_mutex);
8497 }
8498 
8499 void
mptsas_remove_cmd(mptsas_t * mpt,mptsas_cmd_t * cmd)8500 mptsas_remove_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8501 {
8502 	int		slot;
8503 	mptsas_slots_t	*slots = mpt->m_active;
8504 	mptsas_target_t	*ptgt = cmd->cmd_tgt_addr;
8505 
8506 	ASSERT(cmd != NULL);
8507 	ASSERT(cmd->cmd_queued == FALSE);
8508 
8509 	/*
8510 	 * Task Management cmds are removed in their own routines.  Also,
8511 	 * we don't want to modify timeout based on TM cmds.
8512 	 */
8513 	if (cmd->cmd_flags & CFLAG_TM_CMD) {
8514 		return;
8515 	}
8516 
8517 	slot = cmd->cmd_slot;
8518 
8519 	/*
8520 	 * remove the cmd.
8521 	 */
8522 	if (cmd == slots->m_slot[slot]) {
8523 		NDBG31(("mptsas_remove_cmd: removing cmd=0x%p, flags "
8524 		    "0x%x", (void *)cmd, cmd->cmd_flags));
8525 		slots->m_slot[slot] = NULL;
8526 		mpt->m_ncmds--;
8527 
8528 		/*
8529 		 * only decrement per target ncmds if command
8530 		 * has a target associated with it.
8531 		 */
8532 		if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
8533 			ptgt->m_t_ncmds--;
8534 			/*
8535 			 * reset throttle if we just ran an untagged command
8536 			 * to a tagged target
8537 			 */
8538 			if ((ptgt->m_t_ncmds == 0) &&
8539 			    ((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0)) {
8540 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8541 			}
8542 
8543 			/*
8544 			 * Remove this command from the active queue.
8545 			 */
8546 			if (cmd->cmd_active_expiration != 0) {
8547 				TAILQ_REMOVE(&ptgt->m_active_cmdq, cmd,
8548 				    cmd_active_link);
8549 				cmd->cmd_active_expiration = 0;
8550 			}
8551 		}
8552 	}
8553 
8554 	/*
8555 	 * This is all we need to do for ioc commands.
8556 	 */
8557 	if (cmd->cmd_flags & CFLAG_CMDIOC) {
8558 		mptsas_return_to_pool(mpt, cmd);
8559 		return;
8560 	}
8561 
8562 	ASSERT(cmd != slots->m_slot[cmd->cmd_slot]);
8563 }
8564 
8565 /*
8566  * accept all cmds on the tx_waitq if any and then
8567  * start a fresh request from the top of the device queue.
8568  *
8569  * since there are always cmds queued on the tx_waitq, and rare cmds on
8570  * the instance waitq, so this function should not be invoked in the ISR,
8571  * the mptsas_restart_waitq() is invoked in the ISR instead. otherwise, the
8572  * burden belongs to the IO dispatch CPUs is moved the interrupt CPU.
8573  */
8574 static void
mptsas_restart_hba(mptsas_t * mpt)8575 mptsas_restart_hba(mptsas_t *mpt)
8576 {
8577 	ASSERT(mutex_owned(&mpt->m_mutex));
8578 
8579 	mutex_enter(&mpt->m_tx_waitq_mutex);
8580 	if (mpt->m_tx_waitq) {
8581 		mptsas_accept_tx_waitq(mpt);
8582 	}
8583 	mutex_exit(&mpt->m_tx_waitq_mutex);
8584 	mptsas_restart_waitq(mpt);
8585 }
8586 
8587 /*
8588  * start a fresh request from the top of the device queue
8589  */
8590 static void
mptsas_restart_waitq(mptsas_t * mpt)8591 mptsas_restart_waitq(mptsas_t *mpt)
8592 {
8593 	mptsas_cmd_t	*cmd, *next_cmd;
8594 	mptsas_target_t *ptgt = NULL;
8595 
8596 	NDBG1(("mptsas_restart_waitq: mpt=0x%p", (void *)mpt));
8597 
8598 	ASSERT(mutex_owned(&mpt->m_mutex));
8599 
8600 	/*
8601 	 * If there is a reset delay, don't start any cmds.  Otherwise, start
8602 	 * as many cmds as possible.
8603 	 * Since SMID 0 is reserved and the TM slot is reserved, the actual max
8604 	 * commands is m_max_requests - 2.
8605 	 */
8606 	cmd = mpt->m_waitq;
8607 
8608 	while (cmd != NULL) {
8609 		next_cmd = cmd->cmd_linkp;
8610 		if (cmd->cmd_flags & CFLAG_PASSTHRU) {
8611 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8612 				/*
8613 				 * passthru command get slot need
8614 				 * set CFLAG_PREPARED.
8615 				 */
8616 				cmd->cmd_flags |= CFLAG_PREPARED;
8617 				mptsas_waitq_delete(mpt, cmd);
8618 				mptsas_start_passthru(mpt, cmd);
8619 			}
8620 			cmd = next_cmd;
8621 			continue;
8622 		}
8623 		if (cmd->cmd_flags & CFLAG_CONFIG) {
8624 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8625 				/*
8626 				 * Send the config page request and delete it
8627 				 * from the waitq.
8628 				 */
8629 				cmd->cmd_flags |= CFLAG_PREPARED;
8630 				mptsas_waitq_delete(mpt, cmd);
8631 				mptsas_start_config_page_access(mpt, cmd);
8632 			}
8633 			cmd = next_cmd;
8634 			continue;
8635 		}
8636 		if (cmd->cmd_flags & CFLAG_FW_DIAG) {
8637 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8638 				/*
8639 				 * Send the FW Diag request and delete if from
8640 				 * the waitq.
8641 				 */
8642 				cmd->cmd_flags |= CFLAG_PREPARED;
8643 				mptsas_waitq_delete(mpt, cmd);
8644 				mptsas_start_diag(mpt, cmd);
8645 			}
8646 			cmd = next_cmd;
8647 			continue;
8648 		}
8649 
8650 		ptgt = cmd->cmd_tgt_addr;
8651 		if (ptgt && (ptgt->m_t_throttle == DRAIN_THROTTLE) &&
8652 		    (ptgt->m_t_ncmds == 0)) {
8653 			mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
8654 		}
8655 		if ((mpt->m_ncmds <= (mpt->m_max_requests - 2)) &&
8656 		    (ptgt && (ptgt->m_reset_delay == 0)) &&
8657 		    (ptgt && (ptgt->m_t_ncmds <
8658 		    ptgt->m_t_throttle))) {
8659 			if (mptsas_save_cmd(mpt, cmd) == TRUE) {
8660 				mptsas_waitq_delete(mpt, cmd);
8661 				(void) mptsas_start_cmd(mpt, cmd);
8662 			}
8663 		}
8664 		cmd = next_cmd;
8665 	}
8666 }
8667 /*
8668  * Cmds are queued if tran_start() doesn't get the m_mutexlock(no wait).
8669  * Accept all those queued cmds before new cmd is accept so that the
8670  * cmds are sent in order.
8671  */
8672 static void
mptsas_accept_tx_waitq(mptsas_t * mpt)8673 mptsas_accept_tx_waitq(mptsas_t *mpt)
8674 {
8675 	mptsas_cmd_t *cmd;
8676 
8677 	ASSERT(mutex_owned(&mpt->m_mutex));
8678 	ASSERT(mutex_owned(&mpt->m_tx_waitq_mutex));
8679 
8680 	/*
8681 	 * A Bus Reset could occur at any time and flush the tx_waitq,
8682 	 * so we cannot count on the tx_waitq to contain even one cmd.
8683 	 * And when the m_tx_waitq_mutex is released and run
8684 	 * mptsas_accept_pkt(), the tx_waitq may be flushed.
8685 	 */
8686 	cmd = mpt->m_tx_waitq;
8687 	for (;;) {
8688 		if ((cmd = mpt->m_tx_waitq) == NULL) {
8689 			mpt->m_tx_draining = 0;
8690 			break;
8691 		}
8692 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL) {
8693 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
8694 		}
8695 		cmd->cmd_linkp = NULL;
8696 		mutex_exit(&mpt->m_tx_waitq_mutex);
8697 		if (mptsas_accept_pkt(mpt, cmd) != TRAN_ACCEPT)
8698 			cmn_err(CE_WARN, "mpt: mptsas_accept_tx_waitq: failed "
8699 			    "to accept cmd on queue\n");
8700 		mutex_enter(&mpt->m_tx_waitq_mutex);
8701 	}
8702 }
8703 
8704 
8705 /*
8706  * mpt tag type lookup
8707  */
8708 static char mptsas_tag_lookup[] =
8709 	{0, MSG_HEAD_QTAG, MSG_ORDERED_QTAG, 0, MSG_SIMPLE_QTAG};
8710 
8711 static int
mptsas_start_cmd(mptsas_t * mpt,mptsas_cmd_t * cmd)8712 mptsas_start_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
8713 {
8714 	struct scsi_pkt		*pkt = CMD2PKT(cmd);
8715 	uint32_t		control = 0;
8716 	caddr_t			mem, arsbuf;
8717 	pMpi2SCSIIORequest_t	io_request;
8718 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
8719 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
8720 	mptsas_target_t		*ptgt = cmd->cmd_tgt_addr;
8721 	uint16_t		SMID, io_flags = 0;
8722 	uint8_t			ars_size;
8723 	uint64_t		request_desc;
8724 	uint32_t		ars_dmaaddrlow;
8725 	mptsas_cmd_t		*c;
8726 
8727 	NDBG1(("mptsas_start_cmd: cmd=0x%p, flags 0x%x", (void *)cmd,
8728 	    cmd->cmd_flags));
8729 
8730 	/*
8731 	 * Set SMID and increment index.  Rollover to 1 instead of 0 if index
8732 	 * is at the max.  0 is an invalid SMID, so we call the first index 1.
8733 	 */
8734 	SMID = cmd->cmd_slot;
8735 
8736 	/*
8737 	 * It is possible for back to back device reset to
8738 	 * happen before the reset delay has expired.  That's
8739 	 * ok, just let the device reset go out on the bus.
8740 	 */
8741 	if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8742 		ASSERT(ptgt->m_reset_delay == 0);
8743 	}
8744 
8745 	/*
8746 	 * if a non-tagged cmd is submitted to an active tagged target
8747 	 * then drain before submitting this cmd; SCSI-2 allows RQSENSE
8748 	 * to be untagged
8749 	 */
8750 	if (((cmd->cmd_pkt_flags & FLAG_TAGMASK) == 0) &&
8751 	    (ptgt->m_t_ncmds > 1) &&
8752 	    ((cmd->cmd_flags & CFLAG_TM_CMD) == 0) &&
8753 	    (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE)) {
8754 		if ((cmd->cmd_pkt_flags & FLAG_NOINTR) == 0) {
8755 			NDBG23(("target=%d, untagged cmd, start draining\n",
8756 			    ptgt->m_devhdl));
8757 
8758 			if (ptgt->m_reset_delay == 0) {
8759 				mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
8760 			}
8761 
8762 			mptsas_remove_cmd(mpt, cmd);
8763 			cmd->cmd_pkt_flags |= FLAG_HEAD;
8764 			mptsas_waitq_add(mpt, cmd);
8765 		}
8766 		return (DDI_FAILURE);
8767 	}
8768 
8769 	/*
8770 	 * Set correct tag bits.
8771 	 */
8772 	if (cmd->cmd_pkt_flags & FLAG_TAGMASK) {
8773 		switch (mptsas_tag_lookup[((cmd->cmd_pkt_flags &
8774 		    FLAG_TAGMASK) >> 12)]) {
8775 		case MSG_SIMPLE_QTAG:
8776 			control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8777 			break;
8778 		case MSG_HEAD_QTAG:
8779 			control |= MPI2_SCSIIO_CONTROL_HEADOFQ;
8780 			break;
8781 		case MSG_ORDERED_QTAG:
8782 			control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
8783 			break;
8784 		default:
8785 			mptsas_log(mpt, CE_WARN, "mpt: Invalid tag type\n");
8786 			break;
8787 		}
8788 	} else {
8789 		if (*(cmd->cmd_pkt->pkt_cdbp) != SCMD_REQUEST_SENSE) {
8790 				ptgt->m_t_throttle = 1;
8791 		}
8792 		control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
8793 	}
8794 
8795 	if (cmd->cmd_pkt_flags & FLAG_TLR) {
8796 		control |= MPI2_SCSIIO_CONTROL_TLR_ON;
8797 	}
8798 
8799 	mem = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
8800 	io_request = (pMpi2SCSIIORequest_t)mem;
8801 	if (cmd->cmd_extrqslen != 0) {
8802 		/*
8803 		 * Mapping of the buffer was done in mptsas_pkt_alloc_extern().
8804 		 * Calculate the DMA address with the same offset.
8805 		 */
8806 		arsbuf = cmd->cmd_arq_buf;
8807 		ars_size = cmd->cmd_extrqslen;
8808 		ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8809 		    ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
8810 		    0xffffffffu;
8811 	} else {
8812 		arsbuf = mpt->m_req_sense + (mpt->m_req_sense_size * (SMID-1));
8813 		cmd->cmd_arq_buf = arsbuf;
8814 		ars_size = mpt->m_req_sense_size;
8815 		ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
8816 		    (mpt->m_req_sense_size * (SMID-1))) &
8817 		    0xffffffffu;
8818 	}
8819 	bzero(io_request, sizeof (Mpi2SCSIIORequest_t));
8820 	bzero(arsbuf, ars_size);
8821 
8822 	ddi_put8(acc_hdl, &io_request->SGLOffset0, offsetof
8823 	    (MPI2_SCSI_IO_REQUEST, SGL) / 4);
8824 	mptsas_init_std_hdr(acc_hdl, io_request, ptgt->m_devhdl, Lun(cmd), 0,
8825 	    MPI2_FUNCTION_SCSI_IO_REQUEST);
8826 
8827 	(void) ddi_rep_put8(acc_hdl, (uint8_t *)pkt->pkt_cdbp,
8828 	    io_request->CDB.CDB32, cmd->cmd_cdblen, DDI_DEV_AUTOINCR);
8829 
8830 	io_flags = cmd->cmd_cdblen;
8831 	if (mptsas_use_fastpath &&
8832 	    ptgt->m_io_flags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) {
8833 		io_flags |= MPI25_SCSIIO_IOFLAGS_FAST_PATH;
8834 		request_desc = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
8835 	} else {
8836 		request_desc = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
8837 	}
8838 	ddi_put16(acc_hdl, &io_request->IoFlags, io_flags);
8839 	/*
8840 	 * setup the Scatter/Gather DMA list for this request
8841 	 */
8842 	if (cmd->cmd_cookiec > 0) {
8843 		mptsas_sge_setup(mpt, cmd, &control, io_request, acc_hdl);
8844 	} else {
8845 		ddi_put32(acc_hdl, &io_request->SGL.MpiSimple.FlagsLength,
8846 		    ((uint32_t)MPI2_SGE_FLAGS_LAST_ELEMENT |
8847 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
8848 		    MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
8849 		    MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT);
8850 	}
8851 
8852 	/*
8853 	 * save ARQ information
8854 	 */
8855 	ddi_put8(acc_hdl, &io_request->SenseBufferLength, ars_size);
8856 	ddi_put32(acc_hdl, &io_request->SenseBufferLowAddress, ars_dmaaddrlow);
8857 
8858 	ddi_put32(acc_hdl, &io_request->Control, control);
8859 
8860 	NDBG31(("starting message=%d(0x%p), with cmd=0x%p",
8861 	    SMID, (void *)io_request, (void *)cmd));
8862 
8863 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
8864 	(void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
8865 	    DDI_DMA_SYNC_FORDEV);
8866 
8867 	/*
8868 	 * Build request descriptor and write it to the request desc post reg.
8869 	 */
8870 	request_desc |= (SMID << 16);
8871 	request_desc |= (uint64_t)ptgt->m_devhdl << 48;
8872 	MPTSAS_START_CMD(mpt, request_desc);
8873 
8874 	/*
8875 	 * Start timeout.
8876 	 */
8877 	cmd->cmd_active_expiration =
8878 	    gethrtime() + (hrtime_t)pkt->pkt_time * NANOSEC;
8879 #ifdef MPTSAS_TEST
8880 	/*
8881 	 * Force timeouts to happen immediately.
8882 	 */
8883 	if (mptsas_test_timeouts)
8884 		cmd->cmd_active_expiration = gethrtime();
8885 #endif
8886 	c = TAILQ_FIRST(&ptgt->m_active_cmdq);
8887 	if (c == NULL ||
8888 	    c->cmd_active_expiration < cmd->cmd_active_expiration) {
8889 		/*
8890 		 * Common case is that this is the last pending expiration
8891 		 * (or queue is empty). Insert at head of the queue.
8892 		 */
8893 		TAILQ_INSERT_HEAD(&ptgt->m_active_cmdq, cmd, cmd_active_link);
8894 	} else {
8895 		/*
8896 		 * Queue is not empty and first element expires later than
8897 		 * this command. Search for element expiring sooner.
8898 		 */
8899 		while ((c = TAILQ_NEXT(c, cmd_active_link)) != NULL) {
8900 			if (c->cmd_active_expiration <
8901 			    cmd->cmd_active_expiration) {
8902 				TAILQ_INSERT_BEFORE(c, cmd, cmd_active_link);
8903 				break;
8904 			}
8905 		}
8906 		if (c == NULL) {
8907 			/*
8908 			 * No element found expiring sooner, append to
8909 			 * non-empty queue.
8910 			 */
8911 			TAILQ_INSERT_TAIL(&ptgt->m_active_cmdq, cmd,
8912 			    cmd_active_link);
8913 		}
8914 	}
8915 
8916 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
8917 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
8918 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
8919 		return (DDI_FAILURE);
8920 	}
8921 	return (DDI_SUCCESS);
8922 }
8923 
8924 /*
8925  * Select a helper thread to handle current doneq
8926  */
8927 static void
mptsas_deliver_doneq_thread(mptsas_t * mpt)8928 mptsas_deliver_doneq_thread(mptsas_t *mpt)
8929 {
8930 	uint64_t			t, i;
8931 	uint32_t			min = 0xffffffff;
8932 	mptsas_doneq_thread_list_t	*item;
8933 
8934 	for (i = 0; i < mpt->m_doneq_thread_n; i++) {
8935 		item = &mpt->m_doneq_thread_id[i];
8936 		/*
8937 		 * If the completed command on help thread[i] less than
8938 		 * doneq_thread_threshold, then pick the thread[i]. Otherwise
8939 		 * pick a thread which has least completed command.
8940 		 */
8941 
8942 		mutex_enter(&item->mutex);
8943 		if (item->len < mpt->m_doneq_thread_threshold) {
8944 			t = i;
8945 			mutex_exit(&item->mutex);
8946 			break;
8947 		}
8948 		if (item->len < min) {
8949 			min = item->len;
8950 			t = i;
8951 		}
8952 		mutex_exit(&item->mutex);
8953 	}
8954 	mutex_enter(&mpt->m_doneq_thread_id[t].mutex);
8955 	mptsas_doneq_mv(mpt, t);
8956 	cv_signal(&mpt->m_doneq_thread_id[t].cv);
8957 	mutex_exit(&mpt->m_doneq_thread_id[t].mutex);
8958 }
8959 
8960 /*
8961  * move the current global doneq to the doneq of thead[t]
8962  */
8963 static void
mptsas_doneq_mv(mptsas_t * mpt,uint64_t t)8964 mptsas_doneq_mv(mptsas_t *mpt, uint64_t t)
8965 {
8966 	mptsas_cmd_t			*cmd;
8967 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
8968 
8969 	ASSERT(mutex_owned(&item->mutex));
8970 	while ((cmd = mpt->m_doneq) != NULL) {
8971 		if ((mpt->m_doneq = cmd->cmd_linkp) == NULL) {
8972 			mpt->m_donetail = &mpt->m_doneq;
8973 		}
8974 		cmd->cmd_linkp = NULL;
8975 		*item->donetail = cmd;
8976 		item->donetail = &cmd->cmd_linkp;
8977 		mpt->m_doneq_len--;
8978 		item->len++;
8979 	}
8980 }
8981 
8982 void
mptsas_fma_check(mptsas_t * mpt,mptsas_cmd_t * cmd)8983 mptsas_fma_check(mptsas_t *mpt, mptsas_cmd_t *cmd)
8984 {
8985 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
8986 
8987 	/* Check all acc and dma handles */
8988 	if ((mptsas_check_acc_handle(mpt->m_datap) !=
8989 	    DDI_SUCCESS) ||
8990 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
8991 	    DDI_SUCCESS) ||
8992 	    (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
8993 	    DDI_SUCCESS) ||
8994 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
8995 	    DDI_SUCCESS) ||
8996 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
8997 	    DDI_SUCCESS) ||
8998 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
8999 	    DDI_SUCCESS) ||
9000 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
9001 	    DDI_SUCCESS) ||
9002 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
9003 	    DDI_SUCCESS)) {
9004 		ddi_fm_service_impact(mpt->m_dip,
9005 		    DDI_SERVICE_UNAFFECTED);
9006 		ddi_fm_acc_err_clear(mpt->m_config_handle,
9007 		    DDI_FME_VER0);
9008 		pkt->pkt_reason = CMD_TRAN_ERR;
9009 		pkt->pkt_statistics = 0;
9010 	}
9011 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
9012 	    DDI_SUCCESS) ||
9013 	    (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
9014 	    DDI_SUCCESS) ||
9015 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
9016 	    DDI_SUCCESS) ||
9017 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
9018 	    DDI_SUCCESS) ||
9019 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
9020 	    DDI_SUCCESS) ||
9021 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
9022 	    DDI_SUCCESS)) {
9023 		ddi_fm_service_impact(mpt->m_dip,
9024 		    DDI_SERVICE_UNAFFECTED);
9025 		pkt->pkt_reason = CMD_TRAN_ERR;
9026 		pkt->pkt_statistics = 0;
9027 	}
9028 	if (cmd->cmd_dmahandle &&
9029 	    (mptsas_check_dma_handle(cmd->cmd_dmahandle) != DDI_SUCCESS)) {
9030 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9031 		pkt->pkt_reason = CMD_TRAN_ERR;
9032 		pkt->pkt_statistics = 0;
9033 	}
9034 	if ((cmd->cmd_extra_frames &&
9035 	    ((mptsas_check_dma_handle(cmd->cmd_extra_frames->m_dma_hdl) !=
9036 	    DDI_SUCCESS) ||
9037 	    (mptsas_check_acc_handle(cmd->cmd_extra_frames->m_acc_hdl) !=
9038 	    DDI_SUCCESS)))) {
9039 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
9040 		pkt->pkt_reason = CMD_TRAN_ERR;
9041 		pkt->pkt_statistics = 0;
9042 	}
9043 }
9044 
9045 /*
9046  * These routines manipulate the queue of commands that
9047  * are waiting for their completion routines to be called.
9048  * The queue is usually in FIFO order but on an MP system
9049  * it's possible for the completion routines to get out
9050  * of order. If that's a problem you need to add a global
9051  * mutex around the code that calls the completion routine
9052  * in the interrupt handler.
9053  */
9054 static void
mptsas_doneq_add(mptsas_t * mpt,mptsas_cmd_t * cmd)9055 mptsas_doneq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
9056 {
9057 	struct scsi_pkt	*pkt = CMD2PKT(cmd);
9058 
9059 	NDBG31(("mptsas_doneq_add: cmd=0x%p", (void *)cmd));
9060 
9061 	ASSERT((cmd->cmd_flags & CFLAG_COMPLETED) == 0);
9062 	cmd->cmd_linkp = NULL;
9063 	cmd->cmd_flags |= CFLAG_FINISHED;
9064 	cmd->cmd_flags &= ~CFLAG_IN_TRANSPORT;
9065 
9066 	mptsas_fma_check(mpt, cmd);
9067 
9068 	/*
9069 	 * only add scsi pkts that have completion routines to
9070 	 * the doneq.  no intr cmds do not have callbacks.
9071 	 */
9072 	if (pkt && (pkt->pkt_comp)) {
9073 		*mpt->m_donetail = cmd;
9074 		mpt->m_donetail = &cmd->cmd_linkp;
9075 		mpt->m_doneq_len++;
9076 	}
9077 }
9078 
9079 static mptsas_cmd_t *
mptsas_doneq_thread_rm(mptsas_t * mpt,uint64_t t)9080 mptsas_doneq_thread_rm(mptsas_t *mpt, uint64_t t)
9081 {
9082 	mptsas_cmd_t			*cmd;
9083 	mptsas_doneq_thread_list_t	*item = &mpt->m_doneq_thread_id[t];
9084 
9085 	/* pop one off the done queue */
9086 	if ((cmd = item->doneq) != NULL) {
9087 		/* if the queue is now empty fix the tail pointer */
9088 		NDBG31(("mptsas_doneq_thread_rm: cmd=0x%p", (void *)cmd));
9089 		if ((item->doneq = cmd->cmd_linkp) == NULL) {
9090 			item->donetail = &item->doneq;
9091 		}
9092 		cmd->cmd_linkp = NULL;
9093 		item->len--;
9094 	}
9095 	return (cmd);
9096 }
9097 
9098 static void
mptsas_doneq_empty(mptsas_t * mpt)9099 mptsas_doneq_empty(mptsas_t *mpt)
9100 {
9101 	if (mpt->m_doneq && !mpt->m_in_callback) {
9102 		mptsas_cmd_t	*cmd, *next;
9103 		struct scsi_pkt *pkt;
9104 
9105 		mpt->m_in_callback = 1;
9106 		cmd = mpt->m_doneq;
9107 		mpt->m_doneq = NULL;
9108 		mpt->m_donetail = &mpt->m_doneq;
9109 		mpt->m_doneq_len = 0;
9110 
9111 		mutex_exit(&mpt->m_mutex);
9112 		/*
9113 		 * run the completion routines of all the
9114 		 * completed commands
9115 		 */
9116 		while (cmd != NULL) {
9117 			next = cmd->cmd_linkp;
9118 			cmd->cmd_linkp = NULL;
9119 			/* run this command's completion routine */
9120 			cmd->cmd_flags |= CFLAG_COMPLETED;
9121 			pkt = CMD2PKT(cmd);
9122 			mptsas_pkt_comp(pkt, cmd);
9123 			cmd = next;
9124 		}
9125 		mutex_enter(&mpt->m_mutex);
9126 		mpt->m_in_callback = 0;
9127 	}
9128 }
9129 
9130 /*
9131  * These routines manipulate the target's queue of pending requests
9132  */
9133 void
mptsas_waitq_add(mptsas_t * mpt,mptsas_cmd_t * cmd)9134 mptsas_waitq_add(mptsas_t *mpt, mptsas_cmd_t *cmd)
9135 {
9136 	NDBG7(("mptsas_waitq_add: cmd=0x%p", (void *)cmd));
9137 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9138 	cmd->cmd_queued = TRUE;
9139 	if (ptgt)
9140 		ptgt->m_t_nwait++;
9141 	if (cmd->cmd_pkt_flags & FLAG_HEAD) {
9142 		if ((cmd->cmd_linkp = mpt->m_waitq) == NULL) {
9143 			mpt->m_waitqtail = &cmd->cmd_linkp;
9144 		}
9145 		mpt->m_waitq = cmd;
9146 	} else {
9147 		cmd->cmd_linkp = NULL;
9148 		*(mpt->m_waitqtail) = cmd;
9149 		mpt->m_waitqtail = &cmd->cmd_linkp;
9150 	}
9151 }
9152 
9153 static mptsas_cmd_t *
mptsas_waitq_rm(mptsas_t * mpt)9154 mptsas_waitq_rm(mptsas_t *mpt)
9155 {
9156 	mptsas_cmd_t	*cmd;
9157 	mptsas_target_t *ptgt;
9158 	NDBG7(("mptsas_waitq_rm"));
9159 
9160 	MPTSAS_WAITQ_RM(mpt, cmd);
9161 
9162 	NDBG7(("mptsas_waitq_rm: cmd=0x%p", (void *)cmd));
9163 	if (cmd) {
9164 		ptgt = cmd->cmd_tgt_addr;
9165 		if (ptgt) {
9166 			ptgt->m_t_nwait--;
9167 			ASSERT(ptgt->m_t_nwait >= 0);
9168 		}
9169 	}
9170 	return (cmd);
9171 }
9172 
9173 /*
9174  * remove specified cmd from the middle of the wait queue.
9175  */
9176 static void
mptsas_waitq_delete(mptsas_t * mpt,mptsas_cmd_t * cmd)9177 mptsas_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9178 {
9179 	mptsas_cmd_t	*prevp = mpt->m_waitq;
9180 	mptsas_target_t *ptgt = cmd->cmd_tgt_addr;
9181 
9182 	NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9183 	    (void *)mpt, (void *)cmd));
9184 	if (ptgt) {
9185 		ptgt->m_t_nwait--;
9186 		ASSERT(ptgt->m_t_nwait >= 0);
9187 	}
9188 
9189 	if (prevp == cmd) {
9190 		if ((mpt->m_waitq = cmd->cmd_linkp) == NULL)
9191 			mpt->m_waitqtail = &mpt->m_waitq;
9192 
9193 		cmd->cmd_linkp = NULL;
9194 		cmd->cmd_queued = FALSE;
9195 		NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9196 		    (void *)mpt, (void *)cmd));
9197 		return;
9198 	}
9199 
9200 	while (prevp != NULL) {
9201 		if (prevp->cmd_linkp == cmd) {
9202 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9203 				mpt->m_waitqtail = &prevp->cmd_linkp;
9204 
9205 			cmd->cmd_linkp = NULL;
9206 			cmd->cmd_queued = FALSE;
9207 			NDBG7(("mptsas_waitq_delete: mpt=0x%p cmd=0x%p",
9208 			    (void *)mpt, (void *)cmd));
9209 			return;
9210 		}
9211 		prevp = prevp->cmd_linkp;
9212 	}
9213 	cmn_err(CE_PANIC, "mpt: mptsas_waitq_delete: queue botch");
9214 }
9215 
9216 static mptsas_cmd_t *
mptsas_tx_waitq_rm(mptsas_t * mpt)9217 mptsas_tx_waitq_rm(mptsas_t *mpt)
9218 {
9219 	mptsas_cmd_t *cmd;
9220 	NDBG7(("mptsas_tx_waitq_rm"));
9221 
9222 	MPTSAS_TX_WAITQ_RM(mpt, cmd);
9223 
9224 	NDBG7(("mptsas_tx_waitq_rm: cmd=0x%p", (void *)cmd));
9225 
9226 	return (cmd);
9227 }
9228 
9229 /*
9230  * remove specified cmd from the middle of the tx_waitq.
9231  */
9232 static void
mptsas_tx_waitq_delete(mptsas_t * mpt,mptsas_cmd_t * cmd)9233 mptsas_tx_waitq_delete(mptsas_t *mpt, mptsas_cmd_t *cmd)
9234 {
9235 	mptsas_cmd_t *prevp = mpt->m_tx_waitq;
9236 
9237 	NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9238 	    (void *)mpt, (void *)cmd));
9239 
9240 	if (prevp == cmd) {
9241 		if ((mpt->m_tx_waitq = cmd->cmd_linkp) == NULL)
9242 			mpt->m_tx_waitqtail = &mpt->m_tx_waitq;
9243 
9244 		cmd->cmd_linkp = NULL;
9245 		cmd->cmd_queued = FALSE;
9246 		NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9247 		    (void *)mpt, (void *)cmd));
9248 		return;
9249 	}
9250 
9251 	while (prevp != NULL) {
9252 		if (prevp->cmd_linkp == cmd) {
9253 			if ((prevp->cmd_linkp = cmd->cmd_linkp) == NULL)
9254 				mpt->m_tx_waitqtail = &prevp->cmd_linkp;
9255 
9256 			cmd->cmd_linkp = NULL;
9257 			cmd->cmd_queued = FALSE;
9258 			NDBG7(("mptsas_tx_waitq_delete: mpt=0x%p cmd=0x%p",
9259 			    (void *)mpt, (void *)cmd));
9260 			return;
9261 		}
9262 		prevp = prevp->cmd_linkp;
9263 	}
9264 	cmn_err(CE_PANIC, "mpt: mptsas_tx_waitq_delete: queue botch");
9265 }
9266 
9267 /*
9268  * device and bus reset handling
9269  *
9270  * Notes:
9271  *	- RESET_ALL:	reset the controller
9272  *	- RESET_TARGET:	reset the target specified in scsi_address
9273  */
9274 static int
mptsas_scsi_reset(struct scsi_address * ap,int level)9275 mptsas_scsi_reset(struct scsi_address *ap, int level)
9276 {
9277 	mptsas_t		*mpt = ADDR2MPT(ap);
9278 	int			rval;
9279 	mptsas_tgt_private_t	*tgt_private;
9280 	mptsas_target_t		*ptgt = NULL;
9281 
9282 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->tran_tgt_private;
9283 	ptgt = tgt_private->t_private;
9284 	if (ptgt == NULL) {
9285 		return (FALSE);
9286 	}
9287 	NDBG22(("mptsas_scsi_reset: target=%d level=%d", ptgt->m_devhdl,
9288 	    level));
9289 
9290 	mutex_enter(&mpt->m_mutex);
9291 	/*
9292 	 * if we are not in panic set up a reset delay for this target
9293 	 */
9294 	if (!ddi_in_panic()) {
9295 		mptsas_setup_bus_reset_delay(mpt);
9296 	} else {
9297 		drv_usecwait(mpt->m_scsi_reset_delay * 1000);
9298 	}
9299 	rval = mptsas_do_scsi_reset(mpt, ptgt->m_devhdl);
9300 	mutex_exit(&mpt->m_mutex);
9301 
9302 	/*
9303 	 * The transport layer expect to only see TRUE and
9304 	 * FALSE. Therefore, we will adjust the return value
9305 	 * if mptsas_do_scsi_reset returns FAILED.
9306 	 */
9307 	if (rval == FAILED)
9308 		rval = FALSE;
9309 	return (rval);
9310 }
9311 
9312 static int
mptsas_do_scsi_reset(mptsas_t * mpt,uint16_t devhdl)9313 mptsas_do_scsi_reset(mptsas_t *mpt, uint16_t devhdl)
9314 {
9315 	int		rval = FALSE;
9316 	uint8_t		config, disk;
9317 
9318 	ASSERT(mutex_owned(&mpt->m_mutex));
9319 
9320 	if (mptsas_debug_resets) {
9321 		mptsas_log(mpt, CE_WARN, "mptsas_do_scsi_reset: target=%d",
9322 		    devhdl);
9323 	}
9324 
9325 	/*
9326 	 * Issue a Target Reset message to the target specified but not to a
9327 	 * disk making up a raid volume.  Just look through the RAID config
9328 	 * Phys Disk list of DevHandles.  If the target's DevHandle is in this
9329 	 * list, then don't reset this target.
9330 	 */
9331 	for (config = 0; config < mpt->m_num_raid_configs; config++) {
9332 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
9333 			if (devhdl == mpt->m_raidconfig[config].
9334 			    m_physdisk_devhdl[disk]) {
9335 				return (TRUE);
9336 			}
9337 		}
9338 	}
9339 
9340 	rval = mptsas_ioc_task_management(mpt,
9341 	    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, devhdl, 0, NULL, 0, 0);
9342 
9343 	mptsas_doneq_empty(mpt);
9344 	return (rval);
9345 }
9346 
9347 static int
mptsas_scsi_reset_notify(struct scsi_address * ap,int flag,void (* callback)(caddr_t),caddr_t arg)9348 mptsas_scsi_reset_notify(struct scsi_address *ap, int flag,
9349     void (*callback)(caddr_t), caddr_t arg)
9350 {
9351 	mptsas_t	*mpt = ADDR2MPT(ap);
9352 
9353 	NDBG22(("mptsas_scsi_reset_notify: tgt=%d", ap->a_target));
9354 
9355 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
9356 	    &mpt->m_mutex, &mpt->m_reset_notify_listf));
9357 }
9358 
9359 static int
mptsas_get_name(struct scsi_device * sd,char * name,int len)9360 mptsas_get_name(struct scsi_device *sd, char *name, int len)
9361 {
9362 	dev_info_t	*lun_dip = NULL;
9363 
9364 	ASSERT(sd != NULL);
9365 	ASSERT(name != NULL);
9366 	lun_dip = sd->sd_dev;
9367 	ASSERT(lun_dip != NULL);
9368 
9369 	if (mptsas_name_child(lun_dip, name, len) == DDI_SUCCESS) {
9370 		return (1);
9371 	} else {
9372 		return (0);
9373 	}
9374 }
9375 
9376 static int
mptsas_get_bus_addr(struct scsi_device * sd,char * name,int len)9377 mptsas_get_bus_addr(struct scsi_device *sd, char *name, int len)
9378 {
9379 	return (mptsas_get_name(sd, name, len));
9380 }
9381 
9382 void
mptsas_set_throttle(mptsas_t * mpt,mptsas_target_t * ptgt,int what)9383 mptsas_set_throttle(mptsas_t *mpt, mptsas_target_t *ptgt, int what)
9384 {
9385 
9386 	NDBG25(("mptsas_set_throttle: throttle=%x", what));
9387 
9388 	/*
9389 	 * if the bus is draining/quiesced, no changes to the throttles
9390 	 * are allowed. Not allowing change of throttles during draining
9391 	 * limits error recovery but will reduce draining time
9392 	 *
9393 	 * all throttles should have been set to HOLD_THROTTLE
9394 	 */
9395 	if (mpt->m_softstate & (MPTSAS_SS_QUIESCED | MPTSAS_SS_DRAINING)) {
9396 		return;
9397 	}
9398 
9399 	if (what == HOLD_THROTTLE) {
9400 		ptgt->m_t_throttle = HOLD_THROTTLE;
9401 	} else if (ptgt->m_reset_delay == 0) {
9402 		ptgt->m_t_throttle = what;
9403 	}
9404 }
9405 
9406 /*
9407  * Clean up from a device reset.
9408  * For the case of target reset, this function clears the waitq of all
9409  * commands for a particular target.   For the case of abort task set, this
9410  * function clears the waitq of all commonds for a particular target/lun.
9411  */
9412 static void
mptsas_flush_target(mptsas_t * mpt,ushort_t target,int lun,uint8_t tasktype)9413 mptsas_flush_target(mptsas_t *mpt, ushort_t target, int lun, uint8_t tasktype)
9414 {
9415 	mptsas_slots_t	*slots = mpt->m_active;
9416 	mptsas_cmd_t	*cmd, *next_cmd;
9417 	int		slot;
9418 	uchar_t		reason;
9419 	uint_t		stat;
9420 	hrtime_t	timestamp;
9421 
9422 	NDBG25(("mptsas_flush_target: target=%d lun=%d", target, lun));
9423 
9424 	timestamp = gethrtime();
9425 
9426 	/*
9427 	 * Make sure the I/O Controller has flushed all cmds
9428 	 * that are associated with this target for a target reset
9429 	 * and target/lun for abort task set.
9430 	 * Account for TM requests, which use the last SMID.
9431 	 */
9432 	for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9433 		if ((cmd = slots->m_slot[slot]) == NULL)
9434 			continue;
9435 		reason = CMD_RESET;
9436 		stat = STAT_DEV_RESET;
9437 		switch (tasktype) {
9438 		case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9439 			if (Tgt(cmd) == target) {
9440 				if (cmd->cmd_active_expiration <= timestamp) {
9441 					/*
9442 					 * When timeout requested, propagate
9443 					 * proper reason and statistics to
9444 					 * target drivers.
9445 					 */
9446 					reason = CMD_TIMEOUT;
9447 					stat |= STAT_TIMEOUT;
9448 				}
9449 				NDBG25(("mptsas_flush_target discovered non-"
9450 				    "NULL cmd in slot %d, tasktype 0x%x", slot,
9451 				    tasktype));
9452 				mptsas_dump_cmd(mpt, cmd);
9453 				mptsas_remove_cmd(mpt, cmd);
9454 				mptsas_set_pkt_reason(mpt, cmd, reason, stat);
9455 				mptsas_doneq_add(mpt, cmd);
9456 			}
9457 			break;
9458 		case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9459 			reason = CMD_ABORTED;
9460 			stat = STAT_ABORTED;
9461 			/*FALLTHROUGH*/
9462 		case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9463 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9464 
9465 				NDBG25(("mptsas_flush_target discovered non-"
9466 				    "NULL cmd in slot %d, tasktype 0x%x", slot,
9467 				    tasktype));
9468 				mptsas_dump_cmd(mpt, cmd);
9469 				mptsas_remove_cmd(mpt, cmd);
9470 				mptsas_set_pkt_reason(mpt, cmd, reason,
9471 				    stat);
9472 				mptsas_doneq_add(mpt, cmd);
9473 			}
9474 			break;
9475 		default:
9476 			break;
9477 		}
9478 	}
9479 
9480 	/*
9481 	 * Flush the waitq and tx_waitq of this target's cmds
9482 	 */
9483 	cmd = mpt->m_waitq;
9484 
9485 	reason = CMD_RESET;
9486 	stat = STAT_DEV_RESET;
9487 
9488 	switch (tasktype) {
9489 	case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
9490 		while (cmd != NULL) {
9491 			next_cmd = cmd->cmd_linkp;
9492 			if (Tgt(cmd) == target) {
9493 				mptsas_waitq_delete(mpt, cmd);
9494 				mptsas_set_pkt_reason(mpt, cmd,
9495 				    reason, stat);
9496 				mptsas_doneq_add(mpt, cmd);
9497 			}
9498 			cmd = next_cmd;
9499 		}
9500 		mutex_enter(&mpt->m_tx_waitq_mutex);
9501 		cmd = mpt->m_tx_waitq;
9502 		while (cmd != NULL) {
9503 			next_cmd = cmd->cmd_linkp;
9504 			if (Tgt(cmd) == target) {
9505 				mptsas_tx_waitq_delete(mpt, cmd);
9506 				mutex_exit(&mpt->m_tx_waitq_mutex);
9507 				mptsas_set_pkt_reason(mpt, cmd,
9508 				    reason, stat);
9509 				mptsas_doneq_add(mpt, cmd);
9510 				mutex_enter(&mpt->m_tx_waitq_mutex);
9511 			}
9512 			cmd = next_cmd;
9513 		}
9514 		mutex_exit(&mpt->m_tx_waitq_mutex);
9515 		break;
9516 	case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
9517 		reason = CMD_ABORTED;
9518 		stat =  STAT_ABORTED;
9519 		/*FALLTHROUGH*/
9520 	case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
9521 		while (cmd != NULL) {
9522 			next_cmd = cmd->cmd_linkp;
9523 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9524 				mptsas_waitq_delete(mpt, cmd);
9525 				mptsas_set_pkt_reason(mpt, cmd,
9526 				    reason, stat);
9527 				mptsas_doneq_add(mpt, cmd);
9528 			}
9529 			cmd = next_cmd;
9530 		}
9531 		mutex_enter(&mpt->m_tx_waitq_mutex);
9532 		cmd = mpt->m_tx_waitq;
9533 		while (cmd != NULL) {
9534 			next_cmd = cmd->cmd_linkp;
9535 			if ((Tgt(cmd) == target) && (Lun(cmd) == lun)) {
9536 				mptsas_tx_waitq_delete(mpt, cmd);
9537 				mutex_exit(&mpt->m_tx_waitq_mutex);
9538 				mptsas_set_pkt_reason(mpt, cmd,
9539 				    reason, stat);
9540 				mptsas_doneq_add(mpt, cmd);
9541 				mutex_enter(&mpt->m_tx_waitq_mutex);
9542 			}
9543 			cmd = next_cmd;
9544 		}
9545 		mutex_exit(&mpt->m_tx_waitq_mutex);
9546 		break;
9547 	default:
9548 		mptsas_log(mpt, CE_WARN, "Unknown task management type %d.",
9549 		    tasktype);
9550 		break;
9551 	}
9552 }
9553 
9554 /*
9555  * Clean up hba state, abort all outstanding command and commands in waitq
9556  * reset timeout of all targets.
9557  */
9558 static void
mptsas_flush_hba(mptsas_t * mpt)9559 mptsas_flush_hba(mptsas_t *mpt)
9560 {
9561 	mptsas_slots_t	*slots = mpt->m_active;
9562 	mptsas_cmd_t	*cmd;
9563 	int		slot;
9564 
9565 	NDBG25(("mptsas_flush_hba"));
9566 
9567 	/*
9568 	 * The I/O Controller should have already sent back
9569 	 * all commands via the scsi I/O reply frame.  Make
9570 	 * sure all commands have been flushed.
9571 	 * Account for TM request, which use the last SMID.
9572 	 */
9573 	for (slot = 0; slot <= mpt->m_active->m_n_normal; slot++) {
9574 		if ((cmd = slots->m_slot[slot]) == NULL)
9575 			continue;
9576 
9577 		if (cmd->cmd_flags & CFLAG_CMDIOC) {
9578 			/*
9579 			 * Need to make sure to tell everyone that might be
9580 			 * waiting on this command that it's going to fail.  If
9581 			 * we get here, this command will never timeout because
9582 			 * the active command table is going to be re-allocated,
9583 			 * so there will be nothing to check against a time out.
9584 			 * Instead, mark the command as failed due to reset.
9585 			 */
9586 			mptsas_set_pkt_reason(mpt, cmd, CMD_RESET,
9587 			    STAT_BUS_RESET);
9588 			if ((cmd->cmd_flags &
9589 			    (CFLAG_PASSTHRU | CFLAG_CONFIG | CFLAG_FW_DIAG))) {
9590 				cmd->cmd_flags |= CFLAG_FINISHED;
9591 				cv_broadcast(&mpt->m_passthru_cv);
9592 				cv_broadcast(&mpt->m_config_cv);
9593 				cv_broadcast(&mpt->m_fw_diag_cv);
9594 			}
9595 			continue;
9596 		}
9597 
9598 		NDBG25(("mptsas_flush_hba discovered non-NULL cmd in slot %d",
9599 		    slot));
9600 		mptsas_dump_cmd(mpt, cmd);
9601 
9602 		mptsas_remove_cmd(mpt, cmd);
9603 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9604 		mptsas_doneq_add(mpt, cmd);
9605 	}
9606 
9607 	/*
9608 	 * Flush the waitq.
9609 	 */
9610 	while ((cmd = mptsas_waitq_rm(mpt)) != NULL) {
9611 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9612 		if ((cmd->cmd_flags & CFLAG_PASSTHRU) ||
9613 		    (cmd->cmd_flags & CFLAG_CONFIG) ||
9614 		    (cmd->cmd_flags & CFLAG_FW_DIAG)) {
9615 			cmd->cmd_flags |= CFLAG_FINISHED;
9616 			cv_broadcast(&mpt->m_passthru_cv);
9617 			cv_broadcast(&mpt->m_config_cv);
9618 			cv_broadcast(&mpt->m_fw_diag_cv);
9619 		} else {
9620 			mptsas_doneq_add(mpt, cmd);
9621 		}
9622 	}
9623 
9624 	/*
9625 	 * Flush the tx_waitq
9626 	 */
9627 	mutex_enter(&mpt->m_tx_waitq_mutex);
9628 	while ((cmd = mptsas_tx_waitq_rm(mpt)) != NULL) {
9629 		mutex_exit(&mpt->m_tx_waitq_mutex);
9630 		mptsas_set_pkt_reason(mpt, cmd, CMD_RESET, STAT_BUS_RESET);
9631 		mptsas_doneq_add(mpt, cmd);
9632 		mutex_enter(&mpt->m_tx_waitq_mutex);
9633 	}
9634 	mutex_exit(&mpt->m_tx_waitq_mutex);
9635 
9636 	/*
9637 	 * Drain the taskqs prior to reallocating resources. The thread
9638 	 * passing through here could be launched from either (dr)
9639 	 * or (event) taskqs so only wait on the 'other' queue since
9640 	 * waiting on 'this' queue is a deadlock condition.
9641 	 */
9642 	mutex_exit(&mpt->m_mutex);
9643 	if (!taskq_member((taskq_t *)mpt->m_event_taskq, curthread))
9644 		ddi_taskq_wait(mpt->m_event_taskq);
9645 	if (!taskq_member((taskq_t *)mpt->m_dr_taskq, curthread))
9646 		ddi_taskq_wait(mpt->m_dr_taskq);
9647 
9648 	mutex_enter(&mpt->m_mutex);
9649 }
9650 
9651 /*
9652  * set pkt_reason and OR in pkt_statistics flag
9653  */
9654 static void
mptsas_set_pkt_reason(mptsas_t * mpt,mptsas_cmd_t * cmd,uchar_t reason,uint_t stat)9655 mptsas_set_pkt_reason(mptsas_t *mpt, mptsas_cmd_t *cmd, uchar_t reason,
9656     uint_t stat)
9657 {
9658 #ifndef __lock_lint
9659 	_NOTE(ARGUNUSED(mpt))
9660 #endif
9661 
9662 	NDBG25(("mptsas_set_pkt_reason: cmd=0x%p reason=%x stat=%x",
9663 	    (void *)cmd, reason, stat));
9664 
9665 	if (cmd) {
9666 		if (cmd->cmd_pkt->pkt_reason == CMD_CMPLT) {
9667 			cmd->cmd_pkt->pkt_reason = reason;
9668 		}
9669 		cmd->cmd_pkt->pkt_statistics |= stat;
9670 	}
9671 }
9672 
9673 static void
mptsas_start_watch_reset_delay()9674 mptsas_start_watch_reset_delay()
9675 {
9676 	NDBG22(("mptsas_start_watch_reset_delay"));
9677 
9678 	mutex_enter(&mptsas_global_mutex);
9679 	if (mptsas_reset_watch == NULL && mptsas_timeouts_enabled) {
9680 		mptsas_reset_watch = timeout(mptsas_watch_reset_delay, NULL,
9681 		    drv_usectohz((clock_t)
9682 		    MPTSAS_WATCH_RESET_DELAY_TICK * 1000));
9683 		ASSERT(mptsas_reset_watch != NULL);
9684 	}
9685 	mutex_exit(&mptsas_global_mutex);
9686 }
9687 
9688 static void
mptsas_setup_bus_reset_delay(mptsas_t * mpt)9689 mptsas_setup_bus_reset_delay(mptsas_t *mpt)
9690 {
9691 	mptsas_target_t	*ptgt = NULL;
9692 
9693 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
9694 
9695 	NDBG22(("mptsas_setup_bus_reset_delay"));
9696 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9697 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
9698 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
9699 		ptgt->m_reset_delay = mpt->m_scsi_reset_delay;
9700 	}
9701 
9702 	mptsas_start_watch_reset_delay();
9703 }
9704 
9705 /*
9706  * mptsas_watch_reset_delay(_subr) is invoked by timeout() and checks every
9707  * mpt instance for active reset delays
9708  */
9709 static void
mptsas_watch_reset_delay(void * arg)9710 mptsas_watch_reset_delay(void *arg)
9711 {
9712 #ifndef __lock_lint
9713 	_NOTE(ARGUNUSED(arg))
9714 #endif
9715 
9716 	mptsas_t	*mpt;
9717 	int		not_done = 0;
9718 
9719 	NDBG22(("mptsas_watch_reset_delay"));
9720 
9721 	mutex_enter(&mptsas_global_mutex);
9722 	mptsas_reset_watch = 0;
9723 	mutex_exit(&mptsas_global_mutex);
9724 	rw_enter(&mptsas_global_rwlock, RW_READER);
9725 	for (mpt = mptsas_head; mpt != NULL; mpt = mpt->m_next) {
9726 		if (mpt->m_tran == 0) {
9727 			continue;
9728 		}
9729 		mutex_enter(&mpt->m_mutex);
9730 		not_done += mptsas_watch_reset_delay_subr(mpt);
9731 		mutex_exit(&mpt->m_mutex);
9732 	}
9733 	rw_exit(&mptsas_global_rwlock);
9734 
9735 	if (not_done) {
9736 		mptsas_start_watch_reset_delay();
9737 	}
9738 }
9739 
9740 static int
mptsas_watch_reset_delay_subr(mptsas_t * mpt)9741 mptsas_watch_reset_delay_subr(mptsas_t *mpt)
9742 {
9743 	int		done = 0;
9744 	int		restart = 0;
9745 	mptsas_target_t	*ptgt = NULL;
9746 
9747 	NDBG22(("mptsas_watch_reset_delay_subr: mpt=0x%p", (void *)mpt));
9748 
9749 	ASSERT(mutex_owned(&mpt->m_mutex));
9750 
9751 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
9752 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
9753 		if (ptgt->m_reset_delay != 0) {
9754 			ptgt->m_reset_delay -=
9755 			    MPTSAS_WATCH_RESET_DELAY_TICK;
9756 			if (ptgt->m_reset_delay <= 0) {
9757 				ptgt->m_reset_delay = 0;
9758 				mptsas_set_throttle(mpt, ptgt,
9759 				    MAX_THROTTLE);
9760 				restart++;
9761 			} else {
9762 				done = -1;
9763 			}
9764 		}
9765 	}
9766 
9767 	if (restart > 0) {
9768 		mptsas_restart_hba(mpt);
9769 	}
9770 	return (done);
9771 }
9772 
9773 #ifdef MPTSAS_TEST
9774 static void
mptsas_test_reset(mptsas_t * mpt,int target)9775 mptsas_test_reset(mptsas_t *mpt, int target)
9776 {
9777 	mptsas_target_t    *ptgt = NULL;
9778 
9779 	if (mptsas_rtest == target) {
9780 		if (mptsas_do_scsi_reset(mpt, target) == TRUE) {
9781 			mptsas_rtest = -1;
9782 		}
9783 		if (mptsas_rtest == -1) {
9784 			NDBG22(("mptsas_test_reset success"));
9785 		}
9786 	}
9787 }
9788 #endif
9789 
9790 /*
9791  * abort handling:
9792  *
9793  * Notes:
9794  *	- if pkt is not NULL, abort just that command
9795  *	- if pkt is NULL, abort all outstanding commands for target
9796  */
9797 static int
mptsas_scsi_abort(struct scsi_address * ap,struct scsi_pkt * pkt)9798 mptsas_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
9799 {
9800 	mptsas_t		*mpt = ADDR2MPT(ap);
9801 	int			rval;
9802 	mptsas_tgt_private_t	*tgt_private;
9803 	int			target, lun;
9804 
9805 	tgt_private = (mptsas_tgt_private_t *)ap->a_hba_tran->
9806 	    tran_tgt_private;
9807 	ASSERT(tgt_private != NULL);
9808 	target = tgt_private->t_private->m_devhdl;
9809 	lun = tgt_private->t_lun;
9810 
9811 	NDBG23(("mptsas_scsi_abort: target=%d.%d", target, lun));
9812 
9813 	mutex_enter(&mpt->m_mutex);
9814 	rval = mptsas_do_scsi_abort(mpt, target, lun, pkt);
9815 	mutex_exit(&mpt->m_mutex);
9816 	return (rval);
9817 }
9818 
9819 static int
mptsas_do_scsi_abort(mptsas_t * mpt,int target,int lun,struct scsi_pkt * pkt)9820 mptsas_do_scsi_abort(mptsas_t *mpt, int target, int lun, struct scsi_pkt *pkt)
9821 {
9822 	mptsas_cmd_t	*sp = NULL;
9823 	mptsas_slots_t	*slots = mpt->m_active;
9824 	int		rval = FALSE;
9825 
9826 	ASSERT(mutex_owned(&mpt->m_mutex));
9827 
9828 	/*
9829 	 * Abort the command pkt on the target/lun in ap.  If pkt is
9830 	 * NULL, abort all outstanding commands on that target/lun.
9831 	 * If you can abort them, return 1, else return 0.
9832 	 * Each packet that's aborted should be sent back to the target
9833 	 * driver through the callback routine, with pkt_reason set to
9834 	 * CMD_ABORTED.
9835 	 *
9836 	 * abort cmd pkt on HBA hardware; clean out of outstanding
9837 	 * command lists, etc.
9838 	 */
9839 	if (pkt != NULL) {
9840 		/* abort the specified packet */
9841 		sp = PKT2CMD(pkt);
9842 
9843 		if (sp->cmd_queued) {
9844 			NDBG23(("mptsas_do_scsi_abort: queued sp=0x%p aborted",
9845 			    (void *)sp));
9846 			mptsas_waitq_delete(mpt, sp);
9847 			mptsas_set_pkt_reason(mpt, sp, CMD_ABORTED,
9848 			    STAT_ABORTED);
9849 			mptsas_doneq_add(mpt, sp);
9850 			rval = TRUE;
9851 			goto done;
9852 		}
9853 
9854 		/*
9855 		 * Have mpt firmware abort this command
9856 		 */
9857 
9858 		if (slots->m_slot[sp->cmd_slot] != NULL) {
9859 			rval = mptsas_ioc_task_management(mpt,
9860 			    MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, target,
9861 			    lun, NULL, 0, 0);
9862 
9863 			/*
9864 			 * The transport layer expects only TRUE and FALSE.
9865 			 * Therefore, if mptsas_ioc_task_management returns
9866 			 * FAILED we will return FALSE.
9867 			 */
9868 			if (rval == FAILED)
9869 				rval = FALSE;
9870 			goto done;
9871 		}
9872 	}
9873 
9874 	/*
9875 	 * If pkt is NULL then abort task set
9876 	 */
9877 	rval = mptsas_ioc_task_management(mpt,
9878 	    MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET, target, lun, NULL, 0, 0);
9879 
9880 	/*
9881 	 * The transport layer expects only TRUE and FALSE.
9882 	 * Therefore, if mptsas_ioc_task_management returns
9883 	 * FAILED we will return FALSE.
9884 	 */
9885 	if (rval == FAILED)
9886 		rval = FALSE;
9887 
9888 #ifdef MPTSAS_TEST
9889 	if (rval && mptsas_test_stop) {
9890 		debug_enter("mptsas_do_scsi_abort");
9891 	}
9892 #endif
9893 
9894 done:
9895 	mptsas_doneq_empty(mpt);
9896 	return (rval);
9897 }
9898 
9899 /*
9900  * capability handling:
9901  * (*tran_getcap).  Get the capability named, and return its value.
9902  */
9903 static int
mptsas_scsi_getcap(struct scsi_address * ap,char * cap,int tgtonly)9904 mptsas_scsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
9905 {
9906 	mptsas_t	*mpt = ADDR2MPT(ap);
9907 	int		ckey;
9908 	int		rval = FALSE;
9909 
9910 	NDBG24(("mptsas_scsi_getcap: target=%d, cap=%s tgtonly=%x",
9911 	    ap->a_target, cap, tgtonly));
9912 
9913 	mutex_enter(&mpt->m_mutex);
9914 
9915 	if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9916 		mutex_exit(&mpt->m_mutex);
9917 		return (UNDEFINED);
9918 	}
9919 
9920 	switch (ckey) {
9921 	case SCSI_CAP_DMA_MAX:
9922 		rval = (int)mpt->m_msg_dma_attr.dma_attr_maxxfer;
9923 		break;
9924 	case SCSI_CAP_ARQ:
9925 		rval = TRUE;
9926 		break;
9927 	case SCSI_CAP_MSG_OUT:
9928 	case SCSI_CAP_PARITY:
9929 	case SCSI_CAP_UNTAGGED_QING:
9930 		rval = TRUE;
9931 		break;
9932 	case SCSI_CAP_TAGGED_QING:
9933 		rval = TRUE;
9934 		break;
9935 	case SCSI_CAP_RESET_NOTIFICATION:
9936 		rval = TRUE;
9937 		break;
9938 	case SCSI_CAP_LINKED_CMDS:
9939 		rval = FALSE;
9940 		break;
9941 	case SCSI_CAP_QFULL_RETRIES:
9942 		rval = ((mptsas_tgt_private_t *)(ap->a_hba_tran->
9943 		    tran_tgt_private))->t_private->m_qfull_retries;
9944 		break;
9945 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
9946 		rval = drv_hztousec(((mptsas_tgt_private_t *)
9947 		    (ap->a_hba_tran->tran_tgt_private))->
9948 		    t_private->m_qfull_retry_interval) / 1000;
9949 		break;
9950 	case SCSI_CAP_CDB_LEN:
9951 		rval = CDB_GROUP4;
9952 		break;
9953 	case SCSI_CAP_INTERCONNECT_TYPE:
9954 		rval = INTERCONNECT_SAS;
9955 		break;
9956 	case SCSI_CAP_TRAN_LAYER_RETRIES:
9957 		if (mpt->m_ioc_capabilities &
9958 		    MPI2_IOCFACTS_CAPABILITY_TLR)
9959 			rval = TRUE;
9960 		else
9961 			rval = FALSE;
9962 		break;
9963 	default:
9964 		rval = UNDEFINED;
9965 		break;
9966 	}
9967 
9968 	NDBG24(("mptsas_scsi_getcap: %s, rval=%x", cap, rval));
9969 
9970 	mutex_exit(&mpt->m_mutex);
9971 	return (rval);
9972 }
9973 
9974 /*
9975  * (*tran_setcap).  Set the capability named to the value given.
9976  */
9977 static int
mptsas_scsi_setcap(struct scsi_address * ap,char * cap,int value,int tgtonly)9978 mptsas_scsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
9979 {
9980 	mptsas_t	*mpt = ADDR2MPT(ap);
9981 	int		ckey;
9982 	int		rval = FALSE;
9983 
9984 	NDBG24(("mptsas_scsi_setcap: target=%d, cap=%s value=%x tgtonly=%x",
9985 	    ap->a_target, cap, value, tgtonly));
9986 
9987 	if (!tgtonly) {
9988 		return (rval);
9989 	}
9990 
9991 	mutex_enter(&mpt->m_mutex);
9992 
9993 	if ((mptsas_scsi_capchk(cap, tgtonly, &ckey)) != TRUE) {
9994 		mutex_exit(&mpt->m_mutex);
9995 		return (UNDEFINED);
9996 	}
9997 
9998 	switch (ckey) {
9999 	case SCSI_CAP_DMA_MAX:
10000 	case SCSI_CAP_MSG_OUT:
10001 	case SCSI_CAP_PARITY:
10002 	case SCSI_CAP_INITIATOR_ID:
10003 	case SCSI_CAP_LINKED_CMDS:
10004 	case SCSI_CAP_UNTAGGED_QING:
10005 	case SCSI_CAP_RESET_NOTIFICATION:
10006 		/*
10007 		 * None of these are settable via
10008 		 * the capability interface.
10009 		 */
10010 		break;
10011 	case SCSI_CAP_ARQ:
10012 		/*
10013 		 * We cannot turn off arq so return false if asked to
10014 		 */
10015 		if (value) {
10016 			rval = TRUE;
10017 		} else {
10018 			rval = FALSE;
10019 		}
10020 		break;
10021 	case SCSI_CAP_TAGGED_QING:
10022 		mptsas_set_throttle(mpt, ((mptsas_tgt_private_t *)
10023 		    (ap->a_hba_tran->tran_tgt_private))->t_private,
10024 		    MAX_THROTTLE);
10025 		rval = TRUE;
10026 		break;
10027 	case SCSI_CAP_QFULL_RETRIES:
10028 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
10029 		    t_private->m_qfull_retries = (uchar_t)value;
10030 		rval = TRUE;
10031 		break;
10032 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
10033 		((mptsas_tgt_private_t *)(ap->a_hba_tran->tran_tgt_private))->
10034 		    t_private->m_qfull_retry_interval =
10035 		    drv_usectohz(value * 1000);
10036 		rval = TRUE;
10037 		break;
10038 	default:
10039 		rval = UNDEFINED;
10040 		break;
10041 	}
10042 	mutex_exit(&mpt->m_mutex);
10043 	return (rval);
10044 }
10045 
10046 /*
10047  * Utility routine for mptsas_ifsetcap/ifgetcap
10048  */
10049 /*ARGSUSED*/
10050 static int
mptsas_scsi_capchk(char * cap,int tgtonly,int * cidxp)10051 mptsas_scsi_capchk(char *cap, int tgtonly, int *cidxp)
10052 {
10053 	NDBG24(("mptsas_scsi_capchk: cap=%s", cap));
10054 
10055 	if (!cap)
10056 		return (FALSE);
10057 
10058 	*cidxp = scsi_hba_lookup_capstr(cap);
10059 	return (TRUE);
10060 }
10061 
10062 static int
mptsas_alloc_active_slots(mptsas_t * mpt,int flag)10063 mptsas_alloc_active_slots(mptsas_t *mpt, int flag)
10064 {
10065 	mptsas_slots_t	*old_active = mpt->m_active;
10066 	mptsas_slots_t	*new_active;
10067 	size_t		size;
10068 
10069 	/*
10070 	 * if there are active commands, then we cannot
10071 	 * change size of active slots array.
10072 	 */
10073 	ASSERT(mpt->m_ncmds == 0);
10074 
10075 	size = MPTSAS_SLOTS_SIZE(mpt);
10076 	new_active = kmem_zalloc(size, flag);
10077 	if (new_active == NULL) {
10078 		NDBG1(("new active alloc failed"));
10079 		return (-1);
10080 	}
10081 	/*
10082 	 * Since SMID 0 is reserved and the TM slot is reserved, the
10083 	 * number of slots that can be used at any one time is
10084 	 * m_max_requests - 2.
10085 	 */
10086 	new_active->m_n_normal = (mpt->m_max_requests - 2);
10087 	new_active->m_size = size;
10088 	new_active->m_rotor = 1;
10089 	if (old_active)
10090 		mptsas_free_active_slots(mpt);
10091 	mpt->m_active = new_active;
10092 
10093 	return (0);
10094 }
10095 
10096 static void
mptsas_free_active_slots(mptsas_t * mpt)10097 mptsas_free_active_slots(mptsas_t *mpt)
10098 {
10099 	mptsas_slots_t	*active = mpt->m_active;
10100 	size_t		size;
10101 
10102 	if (active == NULL)
10103 		return;
10104 	size = active->m_size;
10105 	kmem_free(active, size);
10106 	mpt->m_active = NULL;
10107 }
10108 
10109 /*
10110  * Error logging, printing, and debug print routines.
10111  */
10112 static char *mptsas_label = "mpt_sas";
10113 
10114 /*PRINTFLIKE3*/
10115 void
mptsas_log(mptsas_t * mpt,int level,char * fmt,...)10116 mptsas_log(mptsas_t *mpt, int level, char *fmt, ...)
10117 {
10118 	dev_info_t	*dev;
10119 	va_list		ap;
10120 
10121 	if (mpt) {
10122 		dev = mpt->m_dip;
10123 	} else {
10124 		dev = 0;
10125 	}
10126 
10127 	mutex_enter(&mptsas_log_mutex);
10128 
10129 	va_start(ap, fmt);
10130 	(void) vsprintf(mptsas_log_buf, fmt, ap);
10131 	va_end(ap);
10132 
10133 	if (level == CE_CONT) {
10134 		scsi_log(dev, mptsas_label, level, "%s\n", mptsas_log_buf);
10135 	} else {
10136 		scsi_log(dev, mptsas_label, level, "%s", mptsas_log_buf);
10137 	}
10138 
10139 	mutex_exit(&mptsas_log_mutex);
10140 }
10141 
10142 #ifdef MPTSAS_DEBUG
10143 /*
10144  * Use a circular buffer to log messages to private memory.
10145  * Increment idx atomically to minimize risk to miss lines.
10146  * It's fast and does not hold up the proceedings too much.
10147  */
10148 static const size_t mptsas_dbglog_linecnt = MPTSAS_DBGLOG_LINECNT;
10149 static const size_t mptsas_dbglog_linelen = MPTSAS_DBGLOG_LINELEN;
10150 static char mptsas_dbglog_bufs[MPTSAS_DBGLOG_LINECNT][MPTSAS_DBGLOG_LINELEN];
10151 static uint32_t mptsas_dbglog_idx = 0;
10152 
10153 /*PRINTFLIKE1*/
10154 void
mptsas_debug_log(char * fmt,...)10155 mptsas_debug_log(char *fmt, ...)
10156 {
10157 	va_list		ap;
10158 	uint32_t	idx;
10159 
10160 	idx = atomic_inc_32_nv(&mptsas_dbglog_idx) &
10161 	    (mptsas_dbglog_linecnt - 1);
10162 
10163 	va_start(ap, fmt);
10164 	(void) vsnprintf(mptsas_dbglog_bufs[idx],
10165 	    mptsas_dbglog_linelen, fmt, ap);
10166 	va_end(ap);
10167 }
10168 
10169 /*PRINTFLIKE1*/
10170 void
mptsas_printf(char * fmt,...)10171 mptsas_printf(char *fmt, ...)
10172 {
10173 	dev_info_t	*dev = 0;
10174 	va_list		ap;
10175 
10176 	mutex_enter(&mptsas_log_mutex);
10177 
10178 	va_start(ap, fmt);
10179 	(void) vsprintf(mptsas_log_buf, fmt, ap);
10180 	va_end(ap);
10181 
10182 #ifdef PROM_PRINTF
10183 	prom_printf("%s:\t%s\n", mptsas_label, mptsas_log_buf);
10184 #else
10185 	scsi_log(dev, mptsas_label, CE_CONT, "!%s\n", mptsas_log_buf);
10186 #endif
10187 	mutex_exit(&mptsas_log_mutex);
10188 }
10189 #endif
10190 
10191 /*
10192  * timeout handling
10193  */
10194 static void
mptsas_watch(void * arg)10195 mptsas_watch(void *arg)
10196 {
10197 #ifndef __lock_lint
10198 	_NOTE(ARGUNUSED(arg))
10199 #endif
10200 
10201 	mptsas_t	*mpt;
10202 	uint32_t	doorbell;
10203 
10204 	NDBG30(("mptsas_watch"));
10205 
10206 	rw_enter(&mptsas_global_rwlock, RW_READER);
10207 	for (mpt = mptsas_head; mpt != (mptsas_t *)NULL; mpt = mpt->m_next) {
10208 
10209 		mutex_enter(&mpt->m_mutex);
10210 
10211 		/* Skip device if not powered on */
10212 		if (mpt->m_options & MPTSAS_OPT_PM) {
10213 			if (mpt->m_power_level == PM_LEVEL_D0) {
10214 				(void) pm_busy_component(mpt->m_dip, 0);
10215 				mpt->m_busy = 1;
10216 			} else {
10217 				mutex_exit(&mpt->m_mutex);
10218 				continue;
10219 			}
10220 		}
10221 
10222 		/*
10223 		 * Check if controller is in a FAULT state. If so, reset it.
10224 		 */
10225 		doorbell = mptsas_hirrd(mpt, &mpt->m_reg->Doorbell);
10226 		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
10227 			doorbell &= MPI2_DOORBELL_DATA_MASK;
10228 			mptsas_log(mpt, CE_WARN, "MPT Firmware Fault, "
10229 			    "code: %04x", doorbell);
10230 			mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
10231 			if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
10232 				mptsas_log(mpt, CE_WARN, "Reset failed"
10233 				    "after fault was detected");
10234 			}
10235 		}
10236 
10237 		/*
10238 		 * For now, always call mptsas_watchsubr.
10239 		 */
10240 		mptsas_watchsubr(mpt);
10241 
10242 		if (mpt->m_options & MPTSAS_OPT_PM) {
10243 			mpt->m_busy = 0;
10244 			(void) pm_idle_component(mpt->m_dip, 0);
10245 		}
10246 
10247 		mutex_exit(&mpt->m_mutex);
10248 	}
10249 	rw_exit(&mptsas_global_rwlock);
10250 
10251 	mutex_enter(&mptsas_global_mutex);
10252 	if (mptsas_timeouts_enabled)
10253 		mptsas_timeout_id = timeout(mptsas_watch, NULL, mptsas_tick);
10254 	mutex_exit(&mptsas_global_mutex);
10255 }
10256 
10257 static void
mptsas_watchsubr_tgt(mptsas_t * mpt,mptsas_target_t * ptgt,hrtime_t timestamp)10258 mptsas_watchsubr_tgt(mptsas_t *mpt, mptsas_target_t *ptgt, hrtime_t timestamp)
10259 {
10260 	mptsas_cmd_t	*cmd;
10261 
10262 	/*
10263 	 * If we were draining due to a qfull condition,
10264 	 * go back to full throttle.
10265 	 */
10266 	if ((ptgt->m_t_throttle < MAX_THROTTLE) &&
10267 	    (ptgt->m_t_throttle > HOLD_THROTTLE) &&
10268 	    (ptgt->m_t_ncmds < ptgt->m_t_throttle)) {
10269 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10270 		mptsas_restart_hba(mpt);
10271 	}
10272 
10273 	cmd = TAILQ_LAST(&ptgt->m_active_cmdq, mptsas_active_cmdq);
10274 	if (cmd == NULL)
10275 		return;
10276 
10277 	if (cmd->cmd_active_expiration <= timestamp) {
10278 		/*
10279 		 * Earliest command timeout expired. Drain throttle.
10280 		 */
10281 		mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10282 
10283 		/*
10284 		 * Check for remaining commands.
10285 		 */
10286 		cmd = TAILQ_FIRST(&ptgt->m_active_cmdq);
10287 		if (cmd->cmd_active_expiration > timestamp) {
10288 			/*
10289 			 * Wait for remaining commands to complete or
10290 			 * time out.
10291 			 */
10292 			NDBG23(("command timed out, pending drain"));
10293 			return;
10294 		}
10295 
10296 		/*
10297 		 * All command timeouts expired.
10298 		 */
10299 		mptsas_log(mpt, CE_NOTE, "Timeout of %d seconds "
10300 		    "expired with %d commands on target %d lun %d.",
10301 		    cmd->cmd_pkt->pkt_time, ptgt->m_t_ncmds,
10302 		    ptgt->m_devhdl, Lun(cmd));
10303 
10304 		mptsas_cmd_timeout(mpt, ptgt);
10305 	} else if (cmd->cmd_active_expiration <=
10306 	    timestamp + (hrtime_t)mptsas_scsi_watchdog_tick * NANOSEC) {
10307 		NDBG23(("pending timeout"));
10308 		mptsas_set_throttle(mpt, ptgt, DRAIN_THROTTLE);
10309 	}
10310 }
10311 
10312 static void
mptsas_watchsubr(mptsas_t * mpt)10313 mptsas_watchsubr(mptsas_t *mpt)
10314 {
10315 	int		i;
10316 	mptsas_cmd_t	*cmd;
10317 	mptsas_target_t	*ptgt = NULL;
10318 	hrtime_t	timestamp = gethrtime();
10319 
10320 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
10321 
10322 	NDBG30(("mptsas_watchsubr: mpt=0x%p", (void *)mpt));
10323 
10324 #ifdef MPTSAS_TEST
10325 	if (mptsas_enable_untagged) {
10326 		mptsas_test_untagged++;
10327 	}
10328 #endif
10329 
10330 	/*
10331 	 * Check for commands stuck in active slot
10332 	 * Account for TM requests, which use the last SMID.
10333 	 */
10334 	for (i = 0; i <= mpt->m_active->m_n_normal; i++) {
10335 		if ((cmd = mpt->m_active->m_slot[i]) != NULL) {
10336 			if (cmd->cmd_active_expiration <= timestamp) {
10337 				if ((cmd->cmd_flags & CFLAG_CMDIOC) == 0) {
10338 					/*
10339 					 * There seems to be a command stuck
10340 					 * in the active slot.  Drain throttle.
10341 					 */
10342 					mptsas_set_throttle(mpt,
10343 					    cmd->cmd_tgt_addr,
10344 					    DRAIN_THROTTLE);
10345 				} else if (cmd->cmd_flags &
10346 				    (CFLAG_PASSTHRU | CFLAG_CONFIG |
10347 				    CFLAG_FW_DIAG)) {
10348 					/*
10349 					 * passthrough command timeout
10350 					 */
10351 					cmd->cmd_flags |= (CFLAG_FINISHED |
10352 					    CFLAG_TIMEOUT);
10353 					cv_broadcast(&mpt->m_passthru_cv);
10354 					cv_broadcast(&mpt->m_config_cv);
10355 					cv_broadcast(&mpt->m_fw_diag_cv);
10356 				}
10357 			}
10358 		}
10359 	}
10360 
10361 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10362 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10363 		mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10364 	}
10365 
10366 	for (ptgt = refhash_first(mpt->m_tmp_targets); ptgt != NULL;
10367 	    ptgt = refhash_next(mpt->m_tmp_targets, ptgt)) {
10368 		mptsas_watchsubr_tgt(mpt, ptgt, timestamp);
10369 	}
10370 }
10371 
10372 /*
10373  * timeout recovery
10374  */
10375 static void
mptsas_cmd_timeout(mptsas_t * mpt,mptsas_target_t * ptgt)10376 mptsas_cmd_timeout(mptsas_t *mpt, mptsas_target_t *ptgt)
10377 {
10378 	uint16_t	devhdl;
10379 	uint64_t	sas_wwn;
10380 	uint8_t		phy;
10381 	char		wwn_str[MPTSAS_WWN_STRLEN];
10382 
10383 	devhdl = ptgt->m_devhdl;
10384 	sas_wwn = ptgt->m_addr.mta_wwn;
10385 	phy = ptgt->m_phynum;
10386 	if (sas_wwn == 0) {
10387 		(void) sprintf(wwn_str, "p%x", phy);
10388 	} else {
10389 		(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
10390 	}
10391 
10392 	NDBG29(("mptsas_cmd_timeout: target=%d", devhdl));
10393 	mptsas_log(mpt, CE_WARN, "Disconnected command timeout for "
10394 	    "target %d %s, enclosure %u", devhdl, wwn_str,
10395 	    ptgt->m_enclosure);
10396 
10397 	/*
10398 	 * Abort all outstanding commands on the device.
10399 	 */
10400 	NDBG29(("mptsas_cmd_timeout: device reset"));
10401 	if (mptsas_do_scsi_reset(mpt, devhdl) != TRUE) {
10402 		mptsas_log(mpt, CE_WARN, "Target %d reset for command timeout "
10403 		    "recovery failed!", devhdl);
10404 	}
10405 }
10406 
10407 /*
10408  * Device / Hotplug control
10409  */
10410 static int
mptsas_scsi_quiesce(dev_info_t * dip)10411 mptsas_scsi_quiesce(dev_info_t *dip)
10412 {
10413 	mptsas_t	*mpt;
10414 	scsi_hba_tran_t	*tran;
10415 
10416 	tran = ddi_get_driver_private(dip);
10417 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10418 		return (-1);
10419 
10420 	return (mptsas_quiesce_bus(mpt));
10421 }
10422 
10423 static int
mptsas_scsi_unquiesce(dev_info_t * dip)10424 mptsas_scsi_unquiesce(dev_info_t *dip)
10425 {
10426 	mptsas_t		*mpt;
10427 	scsi_hba_tran_t	*tran;
10428 
10429 	tran = ddi_get_driver_private(dip);
10430 	if (tran == NULL || (mpt = TRAN2MPT(tran)) == NULL)
10431 		return (-1);
10432 
10433 	return (mptsas_unquiesce_bus(mpt));
10434 }
10435 
10436 static int
mptsas_quiesce_bus(mptsas_t * mpt)10437 mptsas_quiesce_bus(mptsas_t *mpt)
10438 {
10439 	mptsas_target_t	*ptgt = NULL;
10440 
10441 	NDBG28(("mptsas_quiesce_bus"));
10442 	mutex_enter(&mpt->m_mutex);
10443 
10444 	/* Set all the throttles to zero */
10445 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10446 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10447 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10448 	}
10449 
10450 	/* If there are any outstanding commands in the queue */
10451 	if (mpt->m_ncmds) {
10452 		mpt->m_softstate |= MPTSAS_SS_DRAINING;
10453 		mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10454 		    mpt, (MPTSAS_QUIESCE_TIMEOUT * drv_usectohz(1000000)));
10455 		if (cv_wait_sig(&mpt->m_cv, &mpt->m_mutex) == 0) {
10456 			/*
10457 			 * Quiesce has been interrupted
10458 			 */
10459 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10460 			for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10461 			    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10462 				mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10463 			}
10464 			mptsas_restart_hba(mpt);
10465 			if (mpt->m_quiesce_timeid != 0) {
10466 				timeout_id_t tid = mpt->m_quiesce_timeid;
10467 				mpt->m_quiesce_timeid = 0;
10468 				mutex_exit(&mpt->m_mutex);
10469 				(void) untimeout(tid);
10470 				return (-1);
10471 			}
10472 			mutex_exit(&mpt->m_mutex);
10473 			return (-1);
10474 		} else {
10475 			/* Bus has been quiesced */
10476 			ASSERT(mpt->m_quiesce_timeid == 0);
10477 			mpt->m_softstate &= ~MPTSAS_SS_DRAINING;
10478 			mpt->m_softstate |= MPTSAS_SS_QUIESCED;
10479 			mutex_exit(&mpt->m_mutex);
10480 			return (0);
10481 		}
10482 	}
10483 	/* Bus was not busy - QUIESCED */
10484 	mutex_exit(&mpt->m_mutex);
10485 
10486 	return (0);
10487 }
10488 
10489 static int
mptsas_unquiesce_bus(mptsas_t * mpt)10490 mptsas_unquiesce_bus(mptsas_t *mpt)
10491 {
10492 	mptsas_target_t	*ptgt = NULL;
10493 
10494 	NDBG28(("mptsas_unquiesce_bus"));
10495 	mutex_enter(&mpt->m_mutex);
10496 	mpt->m_softstate &= ~MPTSAS_SS_QUIESCED;
10497 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10498 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10499 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
10500 	}
10501 	mptsas_restart_hba(mpt);
10502 	mutex_exit(&mpt->m_mutex);
10503 	return (0);
10504 }
10505 
10506 static void
mptsas_ncmds_checkdrain(void * arg)10507 mptsas_ncmds_checkdrain(void *arg)
10508 {
10509 	mptsas_t	*mpt = arg;
10510 	mptsas_target_t	*ptgt = NULL;
10511 
10512 	mutex_enter(&mpt->m_mutex);
10513 	if (mpt->m_softstate & MPTSAS_SS_DRAINING) {
10514 		mpt->m_quiesce_timeid = 0;
10515 		if (mpt->m_ncmds == 0) {
10516 			/* Command queue has been drained */
10517 			cv_signal(&mpt->m_cv);
10518 		} else {
10519 			/*
10520 			 * The throttle may have been reset because
10521 			 * of a SCSI bus reset
10522 			 */
10523 			for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
10524 			    ptgt = refhash_next(mpt->m_targets, ptgt)) {
10525 				mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
10526 			}
10527 
10528 			mpt->m_quiesce_timeid = timeout(mptsas_ncmds_checkdrain,
10529 			    mpt, (MPTSAS_QUIESCE_TIMEOUT *
10530 			    drv_usectohz(1000000)));
10531 		}
10532 	}
10533 	mutex_exit(&mpt->m_mutex);
10534 }
10535 
10536 /*ARGSUSED*/
10537 static void
mptsas_dump_cmd(mptsas_t * mpt,mptsas_cmd_t * cmd)10538 mptsas_dump_cmd(mptsas_t *mpt, mptsas_cmd_t *cmd)
10539 {
10540 	int	i;
10541 	uint8_t	*cp = (uchar_t *)cmd->cmd_pkt->pkt_cdbp;
10542 	char	buf[128];
10543 
10544 	buf[0] = '\0';
10545 	NDBG25(("?Cmd (0x%p) dump for Target %d Lun %d:\n", (void *)cmd,
10546 	    Tgt(cmd), Lun(cmd)));
10547 	(void) sprintf(&buf[0], "\tcdb=[");
10548 	for (i = 0; i < (int)cmd->cmd_cdblen; i++) {
10549 		(void) sprintf(&buf[strlen(buf)], " 0x%x", *cp++);
10550 	}
10551 	(void) sprintf(&buf[strlen(buf)], " ]");
10552 	NDBG25(("?%s\n", buf));
10553 	NDBG25(("?pkt_flags=0x%x pkt_statistics=0x%x pkt_state=0x%x\n",
10554 	    cmd->cmd_pkt->pkt_flags, cmd->cmd_pkt->pkt_statistics,
10555 	    cmd->cmd_pkt->pkt_state));
10556 	NDBG25(("?pkt_scbp=0x%x cmd_flags=0x%x\n", cmd->cmd_pkt->pkt_scbp ?
10557 	    *(cmd->cmd_pkt->pkt_scbp) : 0, cmd->cmd_flags));
10558 }
10559 
10560 static void
mptsas_passthru_sge(ddi_acc_handle_t acc_hdl,mptsas_pt_request_t * pt,pMpi2SGESimple64_t sgep)10561 mptsas_passthru_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10562     pMpi2SGESimple64_t sgep)
10563 {
10564 	uint32_t		sge_flags;
10565 	uint32_t		data_size, dataout_size;
10566 	ddi_dma_cookie_t	data_cookie;
10567 	ddi_dma_cookie_t	dataout_cookie;
10568 
10569 	data_size = pt->data_size;
10570 	dataout_size = pt->dataout_size;
10571 	data_cookie = pt->data_cookie;
10572 	dataout_cookie = pt->dataout_cookie;
10573 
10574 	if (dataout_size) {
10575 		sge_flags = dataout_size |
10576 		    ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10577 		    MPI2_SGE_FLAGS_END_OF_BUFFER |
10578 		    MPI2_SGE_FLAGS_HOST_TO_IOC |
10579 		    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10580 		    MPI2_SGE_FLAGS_SHIFT);
10581 		ddi_put32(acc_hdl, &sgep->FlagsLength, sge_flags);
10582 		ddi_put32(acc_hdl, &sgep->Address.Low,
10583 		    (uint32_t)(dataout_cookie.dmac_laddress &
10584 		    0xffffffffull));
10585 		ddi_put32(acc_hdl, &sgep->Address.High,
10586 		    (uint32_t)(dataout_cookie.dmac_laddress
10587 		    >> 32));
10588 		sgep++;
10589 	}
10590 	sge_flags = data_size;
10591 	sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
10592 	    MPI2_SGE_FLAGS_LAST_ELEMENT |
10593 	    MPI2_SGE_FLAGS_END_OF_BUFFER |
10594 	    MPI2_SGE_FLAGS_END_OF_LIST |
10595 	    MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
10596 	    MPI2_SGE_FLAGS_SHIFT);
10597 	if (pt->direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
10598 		sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
10599 		    MPI2_SGE_FLAGS_SHIFT);
10600 	} else {
10601 		sge_flags |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
10602 		    MPI2_SGE_FLAGS_SHIFT);
10603 	}
10604 	ddi_put32(acc_hdl, &sgep->FlagsLength,
10605 	    sge_flags);
10606 	ddi_put32(acc_hdl, &sgep->Address.Low,
10607 	    (uint32_t)(data_cookie.dmac_laddress &
10608 	    0xffffffffull));
10609 	ddi_put32(acc_hdl, &sgep->Address.High,
10610 	    (uint32_t)(data_cookie.dmac_laddress >> 32));
10611 }
10612 
10613 static void
mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl,mptsas_pt_request_t * pt,pMpi2IeeeSgeSimple64_t ieeesgep)10614 mptsas_passthru_ieee_sge(ddi_acc_handle_t acc_hdl, mptsas_pt_request_t *pt,
10615     pMpi2IeeeSgeSimple64_t ieeesgep)
10616 {
10617 	uint8_t			sge_flags;
10618 	uint32_t		data_size, dataout_size;
10619 	ddi_dma_cookie_t	data_cookie;
10620 	ddi_dma_cookie_t	dataout_cookie;
10621 
10622 	data_size = pt->data_size;
10623 	dataout_size = pt->dataout_size;
10624 	data_cookie = pt->data_cookie;
10625 	dataout_cookie = pt->dataout_cookie;
10626 
10627 	sge_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
10628 	    MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR);
10629 	if (dataout_size) {
10630 		ddi_put32(acc_hdl, &ieeesgep->Length, dataout_size);
10631 		ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10632 		    (uint32_t)(dataout_cookie.dmac_laddress &
10633 		    0xffffffffull));
10634 		ddi_put32(acc_hdl, &ieeesgep->Address.High,
10635 		    (uint32_t)(dataout_cookie.dmac_laddress >> 32));
10636 		ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10637 		ieeesgep++;
10638 	}
10639 	sge_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
10640 	ddi_put32(acc_hdl, &ieeesgep->Length, data_size);
10641 	ddi_put32(acc_hdl, &ieeesgep->Address.Low,
10642 	    (uint32_t)(data_cookie.dmac_laddress & 0xffffffffull));
10643 	ddi_put32(acc_hdl, &ieeesgep->Address.High,
10644 	    (uint32_t)(data_cookie.dmac_laddress >> 32));
10645 	ddi_put8(acc_hdl, &ieeesgep->Flags, sge_flags);
10646 }
10647 
10648 static void
mptsas_start_passthru(mptsas_t * mpt,mptsas_cmd_t * cmd)10649 mptsas_start_passthru(mptsas_t *mpt, mptsas_cmd_t *cmd)
10650 {
10651 	caddr_t			memp;
10652 	pMPI2RequestHeader_t	request_hdrp;
10653 	struct scsi_pkt		*pkt = cmd->cmd_pkt;
10654 	mptsas_pt_request_t	*pt = pkt->pkt_ha_private;
10655 	uint32_t		request_size;
10656 	uint32_t		i;
10657 	uint64_t		request_desc = 0;
10658 	uint8_t			desc_type;
10659 	uint16_t		SMID;
10660 	uint8_t			*request, function;
10661 	ddi_dma_handle_t	dma_hdl = mpt->m_dma_req_frame_hdl;
10662 	ddi_acc_handle_t	acc_hdl = mpt->m_acc_req_frame_hdl;
10663 
10664 	desc_type = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
10665 
10666 	request = pt->request;
10667 	request_size = pt->request_size;
10668 
10669 	SMID = cmd->cmd_slot;
10670 
10671 	/*
10672 	 * Store the passthrough message in memory location
10673 	 * corresponding to our slot number
10674 	 */
10675 	memp = mpt->m_req_frame + (mpt->m_req_frame_size * SMID);
10676 	request_hdrp = (pMPI2RequestHeader_t)memp;
10677 	bzero(memp, mpt->m_req_frame_size);
10678 
10679 	for (i = 0; i < request_size; i++) {
10680 		bcopy(request + i, memp + i, 1);
10681 	}
10682 
10683 	NDBG15(("mptsas_start_passthru: Func 0x%x, MsgFlags 0x%x, "
10684 	    "size=%d, in %d, out %d, SMID %d", request_hdrp->Function,
10685 	    request_hdrp->MsgFlags, request_size,
10686 	    pt->data_size, pt->dataout_size, SMID));
10687 
10688 	/*
10689 	 * Add an SGE, even if the length is zero.
10690 	 */
10691 	if (mpt->m_MPI25 && pt->simple == 0) {
10692 		mptsas_passthru_ieee_sge(acc_hdl, pt,
10693 		    (pMpi2IeeeSgeSimple64_t)
10694 		    ((uint8_t *)request_hdrp + pt->sgl_offset));
10695 	} else {
10696 		mptsas_passthru_sge(acc_hdl, pt,
10697 		    (pMpi2SGESimple64_t)
10698 		    ((uint8_t *)request_hdrp + pt->sgl_offset));
10699 	}
10700 
10701 	function = request_hdrp->Function;
10702 	if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
10703 	    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
10704 		pMpi2SCSIIORequest_t	scsi_io_req;
10705 		caddr_t			arsbuf;
10706 		uint8_t			ars_size;
10707 		uint32_t		ars_dmaaddrlow;
10708 
10709 		NDBG15(("mptsas_start_passthru: Is SCSI IO Req"));
10710 		scsi_io_req = (pMpi2SCSIIORequest_t)request_hdrp;
10711 
10712 		if (cmd->cmd_extrqslen != 0) {
10713 			/*
10714 			 * Mapping of the buffer was done in
10715 			 * mptsas_do_passthru().
10716 			 * Calculate the DMA address with the same offset.
10717 			 */
10718 			arsbuf = cmd->cmd_arq_buf;
10719 			ars_size = cmd->cmd_extrqslen;
10720 			ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10721 			    ((uintptr_t)arsbuf - (uintptr_t)mpt->m_req_sense)) &
10722 			    0xffffffffu;
10723 		} else {
10724 			arsbuf = mpt->m_req_sense +
10725 			    (mpt->m_req_sense_size * (SMID-1));
10726 			cmd->cmd_arq_buf = arsbuf;
10727 			ars_size = mpt->m_req_sense_size;
10728 			ars_dmaaddrlow = (mpt->m_req_sense_dma_addr +
10729 			    (mpt->m_req_sense_size * (SMID-1))) &
10730 			    0xffffffffu;
10731 		}
10732 		bzero(arsbuf, ars_size);
10733 
10734 		ddi_put8(acc_hdl, &scsi_io_req->SenseBufferLength, ars_size);
10735 		ddi_put32(acc_hdl, &scsi_io_req->SenseBufferLowAddress,
10736 		    ars_dmaaddrlow);
10737 
10738 		/*
10739 		 * Put SGE for data and data_out buffer at the end of
10740 		 * scsi_io_request message header.(64 bytes in total)
10741 		 * Set SGLOffset0 value
10742 		 */
10743 		ddi_put8(acc_hdl, &scsi_io_req->SGLOffset0,
10744 		    offsetof(MPI2_SCSI_IO_REQUEST, SGL) / 4);
10745 
10746 		/*
10747 		 * Setup descriptor info.  RAID passthrough must use the
10748 		 * default request descriptor which is already set, so if this
10749 		 * is a SCSI IO request, change the descriptor to SCSI IO.
10750 		 */
10751 		if (function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
10752 			desc_type = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
10753 			request_desc = ((uint64_t)ddi_get16(acc_hdl,
10754 			    &scsi_io_req->DevHandle) << 48);
10755 		}
10756 		(void) ddi_dma_sync(mpt->m_dma_req_sense_hdl, 0, 0,
10757 		    DDI_DMA_SYNC_FORDEV);
10758 	}
10759 
10760 	/*
10761 	 * We must wait till the message has been completed before
10762 	 * beginning the next message so we wait for this one to
10763 	 * finish.
10764 	 */
10765 	(void) ddi_dma_sync(dma_hdl, 0, 0, DDI_DMA_SYNC_FORDEV);
10766 	request_desc |= (SMID << 16) + desc_type;
10767 	cmd->cmd_rfm = 0;
10768 	MPTSAS_START_CMD(mpt, request_desc);
10769 	if ((mptsas_check_dma_handle(dma_hdl) != DDI_SUCCESS) ||
10770 	    (mptsas_check_acc_handle(acc_hdl) != DDI_SUCCESS)) {
10771 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
10772 	}
10773 }
10774 
10775 typedef void (mptsas_pre_f)(mptsas_t *, mptsas_pt_request_t *);
10776 static mptsas_pre_f	mpi_pre_ioc_facts;
10777 static mptsas_pre_f	mpi_pre_port_facts;
10778 static mptsas_pre_f	mpi_pre_fw_download;
10779 static mptsas_pre_f	mpi_pre_fw_25_download;
10780 static mptsas_pre_f	mpi_pre_fw_upload;
10781 static mptsas_pre_f	mpi_pre_fw_25_upload;
10782 static mptsas_pre_f	mpi_pre_sata_passthrough;
10783 static mptsas_pre_f	mpi_pre_smp_passthrough;
10784 static mptsas_pre_f	mpi_pre_config;
10785 static mptsas_pre_f	mpi_pre_sas_io_unit_control;
10786 static mptsas_pre_f	mpi_pre_scsi_io_req;
10787 
10788 /*
10789  * Prepare the pt for a SAS2 FW_DOWNLOAD request.
10790  */
10791 static void
mpi_pre_fw_download(mptsas_t * mpt,mptsas_pt_request_t * pt)10792 mpi_pre_fw_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10793 {
10794 	pMpi2FWDownloadTCSGE_t tcsge;
10795 	pMpi2FWDownloadRequest req;
10796 
10797 	/*
10798 	 * If SAS3, call separate function.
10799 	 */
10800 	if (mpt->m_MPI25) {
10801 		mpi_pre_fw_25_download(mpt, pt);
10802 		return;
10803 	}
10804 
10805 	/*
10806 	 * User requests should come in with the Transaction
10807 	 * context element where the SGL will go. Putting the
10808 	 * SGL after that seems to work, but don't really know
10809 	 * why. Other drivers tend to create an extra SGL and
10810 	 * refer to the TCE through that.
10811 	 */
10812 	req = (pMpi2FWDownloadRequest)pt->request;
10813 	tcsge = (pMpi2FWDownloadTCSGE_t)&req->SGL;
10814 	if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10815 	    tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10816 		mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10817 	}
10818 
10819 	pt->sgl_offset = offsetof(MPI2_FW_DOWNLOAD_REQUEST, SGL) +
10820 	    sizeof (*tcsge);
10821 	if (pt->request_size != pt->sgl_offset) {
10822 		NDBG15(("mpi_pre_fw_download(): Incorrect req size, "
10823 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10824 		    (int)pt->request_size, (int)pt->sgl_offset,
10825 		    (int)pt->dataout_size));
10826 	}
10827 	if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10828 		NDBG15(("mpi_pre_fw_download(): Incorrect rep size, "
10829 		    "0x%x, should be 0x%x", pt->data_size,
10830 		    (int)sizeof (MPI2_FW_DOWNLOAD_REPLY)));
10831 	}
10832 }
10833 
10834 /*
10835  * Prepare the pt for a SAS3 FW_DOWNLOAD request.
10836  */
10837 static void
mpi_pre_fw_25_download(mptsas_t * mpt,mptsas_pt_request_t * pt)10838 mpi_pre_fw_25_download(mptsas_t *mpt, mptsas_pt_request_t *pt)
10839 {
10840 	pMpi2FWDownloadTCSGE_t tcsge;
10841 	pMpi2FWDownloadRequest req2;
10842 	pMpi25FWDownloadRequest req25;
10843 
10844 	/*
10845 	 * User requests should come in with the Transaction
10846 	 * context element where the SGL will go. The new firmware
10847 	 * Doesn't use TCE and has space in the main request for
10848 	 * this information. So move to the right place.
10849 	 */
10850 	req2 = (pMpi2FWDownloadRequest)pt->request;
10851 	req25 = (pMpi25FWDownloadRequest)pt->request;
10852 	tcsge = (pMpi2FWDownloadTCSGE_t)&req2->SGL;
10853 	if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10854 	    tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10855 		mptsas_log(mpt, CE_WARN, "FW Download tce invalid!");
10856 	}
10857 	req25->ImageOffset = tcsge->ImageOffset;
10858 	req25->ImageSize = tcsge->ImageSize;
10859 
10860 	pt->sgl_offset = offsetof(MPI25_FW_DOWNLOAD_REQUEST, SGL);
10861 	if (pt->request_size != pt->sgl_offset) {
10862 		NDBG15(("mpi_pre_fw_25_download(): Incorrect req size, "
10863 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10864 		    pt->request_size, pt->sgl_offset,
10865 		    pt->dataout_size));
10866 	}
10867 	if (pt->data_size < sizeof (MPI2_FW_DOWNLOAD_REPLY)) {
10868 		NDBG15(("mpi_pre_fw_25_download(): Incorrect rep size, "
10869 		    "0x%x, should be 0x%x", pt->data_size,
10870 		    (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10871 	}
10872 }
10873 
10874 /*
10875  * Prepare the pt for a SAS2 FW_UPLOAD request.
10876  */
10877 static void
mpi_pre_fw_upload(mptsas_t * mpt,mptsas_pt_request_t * pt)10878 mpi_pre_fw_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10879 {
10880 	pMpi2FWUploadTCSGE_t tcsge;
10881 	pMpi2FWUploadRequest_t req;
10882 
10883 	/*
10884 	 * If SAS3, call separate function.
10885 	 */
10886 	if (mpt->m_MPI25) {
10887 		mpi_pre_fw_25_upload(mpt, pt);
10888 		return;
10889 	}
10890 
10891 	/*
10892 	 * User requests should come in with the Transaction
10893 	 * context element where the SGL will go. Putting the
10894 	 * SGL after that seems to work, but don't really know
10895 	 * why. Other drivers tend to create an extra SGL and
10896 	 * refer to the TCE through that.
10897 	 */
10898 	req = (pMpi2FWUploadRequest_t)pt->request;
10899 	tcsge = (pMpi2FWUploadTCSGE_t)&req->SGL;
10900 	if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10901 	    tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10902 		mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10903 	}
10904 
10905 	pt->sgl_offset = offsetof(MPI2_FW_UPLOAD_REQUEST, SGL) +
10906 	    sizeof (*tcsge);
10907 	if (pt->request_size != pt->sgl_offset) {
10908 		NDBG15(("mpi_pre_fw_upload(): Incorrect req size, "
10909 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10910 		    pt->request_size, pt->sgl_offset,
10911 		    pt->dataout_size));
10912 	}
10913 	if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10914 		NDBG15(("mpi_pre_fw_upload(): Incorrect rep size, "
10915 		    "0x%x, should be 0x%x", pt->data_size,
10916 		    (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10917 	}
10918 }
10919 
10920 /*
10921  * Prepare the pt a SAS3 FW_UPLOAD request.
10922  */
10923 static void
mpi_pre_fw_25_upload(mptsas_t * mpt,mptsas_pt_request_t * pt)10924 mpi_pre_fw_25_upload(mptsas_t *mpt, mptsas_pt_request_t *pt)
10925 {
10926 	pMpi2FWUploadTCSGE_t tcsge;
10927 	pMpi2FWUploadRequest_t req2;
10928 	pMpi25FWUploadRequest_t req25;
10929 
10930 	/*
10931 	 * User requests should come in with the Transaction
10932 	 * context element where the SGL will go. The new firmware
10933 	 * Doesn't use TCE and has space in the main request for
10934 	 * this information. So move to the right place.
10935 	 */
10936 	req2 = (pMpi2FWUploadRequest_t)pt->request;
10937 	req25 = (pMpi25FWUploadRequest_t)pt->request;
10938 	tcsge = (pMpi2FWUploadTCSGE_t)&req2->SGL;
10939 	if (tcsge->ContextSize != 0 || tcsge->DetailsLength != 12 ||
10940 	    tcsge->Flags != MPI2_SGE_FLAGS_TRANSACTION_ELEMENT) {
10941 		mptsas_log(mpt, CE_WARN, "FW Upload tce invalid!");
10942 	}
10943 	req25->ImageOffset = tcsge->ImageOffset;
10944 	req25->ImageSize = tcsge->ImageSize;
10945 
10946 	pt->sgl_offset = offsetof(MPI25_FW_UPLOAD_REQUEST, SGL);
10947 	if (pt->request_size != pt->sgl_offset) {
10948 		NDBG15(("mpi_pre_fw_25_upload(): Incorrect req size, "
10949 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10950 		    pt->request_size, pt->sgl_offset,
10951 		    pt->dataout_size));
10952 	}
10953 	if (pt->data_size < sizeof (MPI2_FW_UPLOAD_REPLY)) {
10954 		NDBG15(("mpi_pre_fw_25_upload(): Incorrect rep size, "
10955 		    "0x%x, should be 0x%x", pt->data_size,
10956 		    (int)sizeof (MPI2_FW_UPLOAD_REPLY)));
10957 	}
10958 }
10959 
10960 /*
10961  * Prepare the pt for an IOC_FACTS request.
10962  */
10963 static void
mpi_pre_ioc_facts(mptsas_t * mpt,mptsas_pt_request_t * pt)10964 mpi_pre_ioc_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10965 {
10966 #ifndef __lock_lint
10967 	_NOTE(ARGUNUSED(mpt))
10968 #endif
10969 	if (pt->request_size != sizeof (MPI2_IOC_FACTS_REQUEST)) {
10970 		NDBG15(("mpi_pre_ioc_facts(): Incorrect req size, "
10971 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10972 		    pt->request_size,
10973 		    (int)sizeof (MPI2_IOC_FACTS_REQUEST),
10974 		    pt->dataout_size));
10975 	}
10976 	if (pt->data_size != sizeof (MPI2_IOC_FACTS_REPLY)) {
10977 		NDBG15(("mpi_pre_ioc_facts(): Incorrect rep size, "
10978 		    "0x%x, should be 0x%x", pt->data_size,
10979 		    (int)sizeof (MPI2_IOC_FACTS_REPLY)));
10980 	}
10981 	pt->sgl_offset = (uint16_t)pt->request_size;
10982 }
10983 
10984 /*
10985  * Prepare the pt for a PORT_FACTS request.
10986  */
10987 static void
mpi_pre_port_facts(mptsas_t * mpt,mptsas_pt_request_t * pt)10988 mpi_pre_port_facts(mptsas_t *mpt, mptsas_pt_request_t *pt)
10989 {
10990 #ifndef __lock_lint
10991 	_NOTE(ARGUNUSED(mpt))
10992 #endif
10993 	if (pt->request_size != sizeof (MPI2_PORT_FACTS_REQUEST)) {
10994 		NDBG15(("mpi_pre_port_facts(): Incorrect req size, "
10995 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
10996 		    pt->request_size,
10997 		    (int)sizeof (MPI2_PORT_FACTS_REQUEST),
10998 		    pt->dataout_size));
10999 	}
11000 	if (pt->data_size != sizeof (MPI2_PORT_FACTS_REPLY)) {
11001 		NDBG15(("mpi_pre_port_facts(): Incorrect rep size, "
11002 		    "0x%x, should be 0x%x", pt->data_size,
11003 		    (int)sizeof (MPI2_PORT_FACTS_REPLY)));
11004 	}
11005 	pt->sgl_offset = (uint16_t)pt->request_size;
11006 }
11007 
11008 /*
11009  * Prepare pt for a SATA_PASSTHROUGH request.
11010  */
11011 static void
mpi_pre_sata_passthrough(mptsas_t * mpt,mptsas_pt_request_t * pt)11012 mpi_pre_sata_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
11013 {
11014 #ifndef __lock_lint
11015 	_NOTE(ARGUNUSED(mpt))
11016 #endif
11017 	pt->sgl_offset = offsetof(MPI2_SATA_PASSTHROUGH_REQUEST, SGL);
11018 	if (pt->request_size != pt->sgl_offset) {
11019 		NDBG15(("mpi_pre_sata_passthrough(): Incorrect req size, "
11020 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
11021 		    pt->request_size, pt->sgl_offset,
11022 		    pt->dataout_size));
11023 	}
11024 	if (pt->data_size != sizeof (MPI2_SATA_PASSTHROUGH_REPLY)) {
11025 		NDBG15(("mpi_pre_sata_passthrough(): Incorrect rep size, "
11026 		    "0x%x, should be 0x%x", pt->data_size,
11027 		    (int)sizeof (MPI2_SATA_PASSTHROUGH_REPLY)));
11028 	}
11029 }
11030 
11031 static void
mpi_pre_smp_passthrough(mptsas_t * mpt,mptsas_pt_request_t * pt)11032 mpi_pre_smp_passthrough(mptsas_t *mpt, mptsas_pt_request_t *pt)
11033 {
11034 #ifndef __lock_lint
11035 	_NOTE(ARGUNUSED(mpt))
11036 #endif
11037 	pt->sgl_offset = offsetof(MPI2_SMP_PASSTHROUGH_REQUEST, SGL);
11038 	if (pt->request_size != pt->sgl_offset) {
11039 		NDBG15(("mpi_pre_smp_passthrough(): Incorrect req size, "
11040 		    "0x%x, should be 0x%x, dataoutsz 0x%x",
11041 		    pt->request_size, pt->sgl_offset,
11042 		    pt->dataout_size));
11043 	}
11044 	if (pt->data_size != sizeof (MPI2_SMP_PASSTHROUGH_REPLY)) {
11045 		NDBG15(("mpi_pre_smp_passthrough(): Incorrect rep size, "
11046 		    "0x%x, should be 0x%x", pt->data_size,
11047 		    (int)sizeof (MPI2_SMP_PASSTHROUGH_REPLY)));
11048 	}
11049 }
11050 
11051 /*
11052  * Prepare pt for a CONFIG request.
11053  */
11054 static void
mpi_pre_config(mptsas_t * mpt,mptsas_pt_request_t * pt)11055 mpi_pre_config(mptsas_t *mpt, mptsas_pt_request_t *pt)
11056 {
11057 #ifndef __lock_lint
11058 	_NOTE(ARGUNUSED(mpt))
11059 #endif
11060 	pt->sgl_offset = offsetof(MPI2_CONFIG_REQUEST, PageBufferSGE);
11061 	if (pt->request_size != pt->sgl_offset) {
11062 		NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
11063 		    "should be 0x%x, dataoutsz 0x%x", pt->request_size,
11064 		    pt->sgl_offset, pt->dataout_size));
11065 	}
11066 	if (pt->data_size != sizeof (MPI2_CONFIG_REPLY)) {
11067 		NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11068 		    "should be 0x%x", pt->data_size,
11069 		    (int)sizeof (MPI2_CONFIG_REPLY)));
11070 	}
11071 	pt->simple = 1;
11072 }
11073 
11074 /*
11075  * Prepare pt for a SCSI_IO_REQ request.
11076  */
11077 static void
mpi_pre_scsi_io_req(mptsas_t * mpt,mptsas_pt_request_t * pt)11078 mpi_pre_scsi_io_req(mptsas_t *mpt, mptsas_pt_request_t *pt)
11079 {
11080 #ifndef __lock_lint
11081 	_NOTE(ARGUNUSED(mpt))
11082 #endif
11083 	pt->sgl_offset = offsetof(MPI2_SCSI_IO_REQUEST, SGL);
11084 	if (pt->request_size != pt->sgl_offset) {
11085 		NDBG15(("mpi_pre_config(): Incorrect req size, 0x%x, "
11086 		    "should be 0x%x, dataoutsz 0x%x", pt->request_size,
11087 		    pt->sgl_offset,
11088 		    pt->dataout_size));
11089 	}
11090 	if (pt->data_size != sizeof (MPI2_SCSI_IO_REPLY)) {
11091 		NDBG15(("mpi_pre_config(): Incorrect rep size, 0x%x, "
11092 		    "should be 0x%x", pt->data_size,
11093 		    (int)sizeof (MPI2_SCSI_IO_REPLY)));
11094 	}
11095 }
11096 
11097 /*
11098  * Prepare the mptsas_cmd for a SAS_IO_UNIT_CONTROL request.
11099  */
11100 static void
mpi_pre_sas_io_unit_control(mptsas_t * mpt,mptsas_pt_request_t * pt)11101 mpi_pre_sas_io_unit_control(mptsas_t *mpt, mptsas_pt_request_t *pt)
11102 {
11103 #ifndef __lock_lint
11104 	_NOTE(ARGUNUSED(mpt))
11105 #endif
11106 	pt->sgl_offset = (uint16_t)pt->request_size;
11107 }
11108 
11109 /*
11110  * A set of functions to prepare an mptsas_cmd for the various
11111  * supported requests.
11112  */
11113 static struct mptsas_func {
11114 	U8		Function;
11115 	char		*Name;
11116 	mptsas_pre_f	*f_pre;
11117 } mptsas_func_list[] = {
11118 	{ MPI2_FUNCTION_IOC_FACTS, "IOC_FACTS",		mpi_pre_ioc_facts },
11119 	{ MPI2_FUNCTION_PORT_FACTS, "PORT_FACTS",	mpi_pre_port_facts },
11120 	{ MPI2_FUNCTION_FW_DOWNLOAD, "FW_DOWNLOAD",	mpi_pre_fw_download },
11121 	{ MPI2_FUNCTION_FW_UPLOAD, "FW_UPLOAD",		mpi_pre_fw_upload },
11122 	{ MPI2_FUNCTION_SATA_PASSTHROUGH, "SATA_PASSTHROUGH",
11123 	    mpi_pre_sata_passthrough },
11124 	{ MPI2_FUNCTION_SMP_PASSTHROUGH, "SMP_PASSTHROUGH",
11125 	    mpi_pre_smp_passthrough},
11126 	{ MPI2_FUNCTION_SCSI_IO_REQUEST, "SCSI_IO_REQUEST",
11127 	    mpi_pre_scsi_io_req},
11128 	{ MPI2_FUNCTION_CONFIG, "CONFIG",		mpi_pre_config},
11129 	{ MPI2_FUNCTION_SAS_IO_UNIT_CONTROL, "SAS_IO_UNIT_CONTROL",
11130 	    mpi_pre_sas_io_unit_control },
11131 	{ 0xFF, NULL,				NULL } /* list end */
11132 };
11133 
11134 static void
mptsas_prep_sgl_offset(mptsas_t * mpt,mptsas_pt_request_t * pt)11135 mptsas_prep_sgl_offset(mptsas_t *mpt, mptsas_pt_request_t *pt)
11136 {
11137 	pMPI2RequestHeader_t	hdr;
11138 	struct mptsas_func	*f;
11139 
11140 	hdr = (pMPI2RequestHeader_t)pt->request;
11141 
11142 	for (f = mptsas_func_list; f->f_pre != NULL; f++) {
11143 		if (hdr->Function == f->Function) {
11144 			f->f_pre(mpt, pt);
11145 			NDBG15(("mptsas_prep_sgl_offset: Function %s,"
11146 			    " sgl_offset 0x%x", f->Name,
11147 			    pt->sgl_offset));
11148 			return;
11149 		}
11150 	}
11151 	NDBG15(("mptsas_prep_sgl_offset: Unknown Function 0x%02x,"
11152 	    " returning req_size 0x%x for sgl_offset",
11153 	    hdr->Function, pt->request_size));
11154 	pt->sgl_offset = (uint16_t)pt->request_size;
11155 }
11156 
11157 
11158 static int
mptsas_do_passthru(mptsas_t * mpt,uint8_t * request,uint8_t * reply,uint8_t * data,uint32_t request_size,uint32_t reply_size,uint32_t data_size,uint32_t direction,uint8_t * dataout,uint32_t dataout_size,short timeout,int mode)11159 mptsas_do_passthru(mptsas_t *mpt, uint8_t *request, uint8_t *reply,
11160     uint8_t *data, uint32_t request_size, uint32_t reply_size,
11161     uint32_t data_size, uint32_t direction, uint8_t *dataout,
11162     uint32_t dataout_size, short timeout, int mode)
11163 {
11164 	mptsas_pt_request_t		pt;
11165 	mptsas_dma_alloc_state_t	data_dma_state;
11166 	mptsas_dma_alloc_state_t	dataout_dma_state;
11167 	caddr_t				memp;
11168 	mptsas_cmd_t			*cmd = NULL;
11169 	struct scsi_pkt			*pkt;
11170 	uint32_t			reply_len = 0, sense_len = 0;
11171 	pMPI2RequestHeader_t		request_hdrp;
11172 	pMPI2RequestHeader_t		request_msg;
11173 	pMPI2DefaultReply_t		reply_msg;
11174 	Mpi2SCSIIOReply_t		rep_msg;
11175 	int				rvalue;
11176 	int				i, status = 0, pt_flags = 0, rv = 0;
11177 	uint8_t				function;
11178 
11179 	ASSERT(mutex_owned(&mpt->m_mutex));
11180 
11181 	reply_msg = (pMPI2DefaultReply_t)(&rep_msg);
11182 	bzero(reply_msg, sizeof (MPI2_DEFAULT_REPLY));
11183 	request_msg = kmem_zalloc(request_size, KM_SLEEP);
11184 
11185 	mutex_exit(&mpt->m_mutex);
11186 	/*
11187 	 * copy in the request buffer since it could be used by
11188 	 * another thread when the pt request into waitq
11189 	 */
11190 	if (ddi_copyin(request, request_msg, request_size, mode)) {
11191 		mutex_enter(&mpt->m_mutex);
11192 		status = EFAULT;
11193 		mptsas_log(mpt, CE_WARN, "failed to copy request data");
11194 		goto out;
11195 	}
11196 	NDBG27(("mptsas_do_passthru: mode 0x%x, size 0x%x, Func 0x%x",
11197 	    mode, request_size, request_msg->Function));
11198 	mutex_enter(&mpt->m_mutex);
11199 
11200 	function = request_msg->Function;
11201 	if (function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
11202 		pMpi2SCSITaskManagementRequest_t	task;
11203 		task = (pMpi2SCSITaskManagementRequest_t)request_msg;
11204 		mptsas_setup_bus_reset_delay(mpt);
11205 		rv = mptsas_ioc_task_management(mpt, task->TaskType,
11206 		    task->DevHandle, (int)task->LUN[1], reply, reply_size,
11207 		    mode);
11208 
11209 		if (rv != TRUE) {
11210 			status = EIO;
11211 			mptsas_log(mpt, CE_WARN, "task management failed");
11212 		}
11213 		goto out;
11214 	}
11215 
11216 	if (data_size != 0) {
11217 		data_dma_state.size = data_size;
11218 		if (mptsas_dma_alloc(mpt, &data_dma_state) != DDI_SUCCESS) {
11219 			status = ENOMEM;
11220 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11221 			    "resource");
11222 			goto out;
11223 		}
11224 		pt_flags |= MPTSAS_DATA_ALLOCATED;
11225 		if (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11226 			mutex_exit(&mpt->m_mutex);
11227 			for (i = 0; i < data_size; i++) {
11228 				if (ddi_copyin(data + i, (uint8_t *)
11229 				    data_dma_state.memp + i, 1, mode)) {
11230 					mutex_enter(&mpt->m_mutex);
11231 					status = EFAULT;
11232 					mptsas_log(mpt, CE_WARN, "failed to "
11233 					    "copy read data");
11234 					goto out;
11235 				}
11236 			}
11237 			mutex_enter(&mpt->m_mutex);
11238 		}
11239 	} else {
11240 		bzero(&data_dma_state, sizeof (data_dma_state));
11241 	}
11242 
11243 	if (dataout_size != 0) {
11244 		dataout_dma_state.size = dataout_size;
11245 		if (mptsas_dma_alloc(mpt, &dataout_dma_state) != DDI_SUCCESS) {
11246 			status = ENOMEM;
11247 			mptsas_log(mpt, CE_WARN, "failed to alloc DMA "
11248 			    "resource");
11249 			goto out;
11250 		}
11251 		pt_flags |= MPTSAS_DATAOUT_ALLOCATED;
11252 		mutex_exit(&mpt->m_mutex);
11253 		for (i = 0; i < dataout_size; i++) {
11254 			if (ddi_copyin(dataout + i, (uint8_t *)
11255 			    dataout_dma_state.memp + i, 1, mode)) {
11256 				mutex_enter(&mpt->m_mutex);
11257 				mptsas_log(mpt, CE_WARN, "failed to copy out"
11258 				    " data");
11259 				status = EFAULT;
11260 				goto out;
11261 			}
11262 		}
11263 		mutex_enter(&mpt->m_mutex);
11264 	} else {
11265 		bzero(&dataout_dma_state, sizeof (dataout_dma_state));
11266 	}
11267 
11268 	if ((rvalue = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11269 		status = EAGAIN;
11270 		mptsas_log(mpt, CE_NOTE, "event ack command pool is full");
11271 		goto out;
11272 	}
11273 	pt_flags |= MPTSAS_REQUEST_POOL_CMD;
11274 
11275 	bzero((caddr_t)cmd, sizeof (*cmd));
11276 	bzero((caddr_t)pkt, scsi_pkt_size());
11277 	bzero((caddr_t)&pt, sizeof (pt));
11278 
11279 	cmd->ioc_cmd_slot = (uint32_t)(rvalue);
11280 
11281 	pt.request = (uint8_t *)request_msg;
11282 	pt.direction = direction;
11283 	pt.simple = 0;
11284 	pt.request_size = request_size;
11285 	pt.data_size = data_size;
11286 	pt.dataout_size = dataout_size;
11287 	pt.data_cookie = data_dma_state.cookie;
11288 	pt.dataout_cookie = dataout_dma_state.cookie;
11289 	mptsas_prep_sgl_offset(mpt, &pt);
11290 
11291 	/*
11292 	 * Form a blank cmd/pkt to store the acknowledgement message
11293 	 */
11294 	pkt->pkt_cdbp		= (opaque_t)&cmd->cmd_cdb[0];
11295 	pkt->pkt_scbp		= (opaque_t)&cmd->cmd_scb;
11296 	pkt->pkt_ha_private	= (opaque_t)&pt;
11297 	pkt->pkt_flags		= FLAG_HEAD;
11298 	pkt->pkt_time		= timeout;
11299 	cmd->cmd_pkt		= pkt;
11300 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_PASSTHRU;
11301 
11302 	if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11303 	    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11304 		uint8_t			com, cdb_group_id;
11305 		boolean_t		ret;
11306 
11307 		pkt->pkt_cdbp = ((pMpi2SCSIIORequest_t)request_msg)->CDB.CDB32;
11308 		com = pkt->pkt_cdbp[0];
11309 		cdb_group_id = CDB_GROUPID(com);
11310 		switch (cdb_group_id) {
11311 		case CDB_GROUPID_0: cmd->cmd_cdblen = CDB_GROUP0; break;
11312 		case CDB_GROUPID_1: cmd->cmd_cdblen = CDB_GROUP1; break;
11313 		case CDB_GROUPID_2: cmd->cmd_cdblen = CDB_GROUP2; break;
11314 		case CDB_GROUPID_4: cmd->cmd_cdblen = CDB_GROUP4; break;
11315 		case CDB_GROUPID_5: cmd->cmd_cdblen = CDB_GROUP5; break;
11316 		default:
11317 			NDBG27(("mptsas_do_passthru: SCSI_IO, reserved "
11318 			    "CDBGROUP 0x%x requested!", cdb_group_id));
11319 			break;
11320 		}
11321 
11322 		reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11323 		sense_len = reply_size - reply_len;
11324 		ret = mptsas_cmdarqsize(mpt, cmd, sense_len, KM_SLEEP);
11325 		VERIFY(ret == B_TRUE);
11326 	} else {
11327 		reply_len = reply_size;
11328 		sense_len = 0;
11329 	}
11330 
11331 	NDBG27(("mptsas_do_passthru: %s, dsz 0x%x, dosz 0x%x, replen 0x%x, "
11332 	    "snslen 0x%x",
11333 	    (direction == MPTSAS_PASS_THRU_DIRECTION_WRITE)?"Write":"Read",
11334 	    data_size, dataout_size, reply_len, sense_len));
11335 
11336 	/*
11337 	 * Save the command in a slot
11338 	 */
11339 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11340 		/*
11341 		 * Once passthru command get slot, set cmd_flags
11342 		 * CFLAG_PREPARED.
11343 		 */
11344 		cmd->cmd_flags |= CFLAG_PREPARED;
11345 		mptsas_start_passthru(mpt, cmd);
11346 	} else {
11347 		mptsas_waitq_add(mpt, cmd);
11348 	}
11349 
11350 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11351 		cv_wait(&mpt->m_passthru_cv, &mpt->m_mutex);
11352 	}
11353 
11354 	NDBG27(("mptsas_do_passthru: Cmd complete, flags 0x%x, rfm 0x%x "
11355 	    "pktreason 0x%x", cmd->cmd_flags, cmd->cmd_rfm,
11356 	    pkt->pkt_reason));
11357 
11358 	if (cmd->cmd_flags & CFLAG_PREPARED) {
11359 		memp = mpt->m_req_frame + (mpt->m_req_frame_size *
11360 		    cmd->cmd_slot);
11361 		request_hdrp = (pMPI2RequestHeader_t)memp;
11362 	}
11363 
11364 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11365 		status = ETIMEDOUT;
11366 		mptsas_log(mpt, CE_WARN, "passthrough command timeout");
11367 		pt_flags |= MPTSAS_CMD_TIMEOUT;
11368 		goto out;
11369 	}
11370 
11371 	if (cmd->cmd_rfm) {
11372 		/*
11373 		 * cmd_rfm is zero means the command reply is a CONTEXT
11374 		 * reply and no PCI Write to post the free reply SMFA
11375 		 * because no reply message frame is used.
11376 		 * cmd_rfm is non-zero means the reply is a ADDRESS
11377 		 * reply and reply message frame is used.
11378 		 */
11379 		pt_flags |= MPTSAS_ADDRESS_REPLY;
11380 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11381 		    DDI_DMA_SYNC_FORCPU);
11382 		reply_msg = (pMPI2DefaultReply_t)
11383 		    (mpt->m_reply_frame + (cmd->cmd_rfm -
11384 		    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11385 	}
11386 
11387 	mptsas_fma_check(mpt, cmd);
11388 	if (pkt->pkt_reason == CMD_TRAN_ERR) {
11389 		status = EAGAIN;
11390 		mptsas_log(mpt, CE_WARN, "passthru fma error");
11391 		goto out;
11392 	}
11393 	if (pkt->pkt_reason == CMD_RESET) {
11394 		status = EAGAIN;
11395 		mptsas_log(mpt, CE_WARN, "ioc reset abort passthru");
11396 		goto out;
11397 	}
11398 
11399 	if (pkt->pkt_reason == CMD_INCOMPLETE) {
11400 		status = EIO;
11401 		mptsas_log(mpt, CE_WARN, "passthrough command incomplete");
11402 		goto out;
11403 	}
11404 
11405 	mutex_exit(&mpt->m_mutex);
11406 	if (cmd->cmd_flags & CFLAG_PREPARED) {
11407 		function = request_hdrp->Function;
11408 		if ((function == MPI2_FUNCTION_SCSI_IO_REQUEST) ||
11409 		    (function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
11410 			reply_len = sizeof (MPI2_SCSI_IO_REPLY);
11411 			sense_len = cmd->cmd_extrqslen ?
11412 			    min(sense_len, cmd->cmd_extrqslen) :
11413 			    min(sense_len, cmd->cmd_rqslen);
11414 		} else {
11415 			reply_len = reply_size;
11416 			sense_len = 0;
11417 		}
11418 
11419 		for (i = 0; i < reply_len; i++) {
11420 			if (ddi_copyout((uint8_t *)reply_msg + i, reply + i, 1,
11421 			    mode)) {
11422 				mutex_enter(&mpt->m_mutex);
11423 				status = EFAULT;
11424 				mptsas_log(mpt, CE_WARN, "failed to copy out "
11425 				    "reply data");
11426 				goto out;
11427 			}
11428 		}
11429 		for (i = 0; i < sense_len; i++) {
11430 			if (ddi_copyout((uint8_t *)request_hdrp + 64 + i,
11431 			    reply + reply_len + i, 1, mode)) {
11432 				mutex_enter(&mpt->m_mutex);
11433 				status = EFAULT;
11434 				mptsas_log(mpt, CE_WARN, "failed to copy out "
11435 				    "sense data");
11436 				goto out;
11437 			}
11438 		}
11439 	}
11440 
11441 	if (data_size) {
11442 		if (direction != MPTSAS_PASS_THRU_DIRECTION_WRITE) {
11443 			(void) ddi_dma_sync(data_dma_state.handle, 0, 0,
11444 			    DDI_DMA_SYNC_FORCPU);
11445 			for (i = 0; i < data_size; i++) {
11446 				if (ddi_copyout((uint8_t *)(
11447 				    data_dma_state.memp + i), data + i,  1,
11448 				    mode)) {
11449 					mutex_enter(&mpt->m_mutex);
11450 					status = EFAULT;
11451 					mptsas_log(mpt, CE_WARN, "failed to "
11452 					    "copy out the reply data");
11453 					goto out;
11454 				}
11455 			}
11456 		}
11457 	}
11458 	mutex_enter(&mpt->m_mutex);
11459 out:
11460 	/*
11461 	 * Put the reply frame back on the free queue, increment the free
11462 	 * index, and write the new index to the free index register.  But only
11463 	 * if this reply is an ADDRESS reply.
11464 	 */
11465 	if (pt_flags & MPTSAS_ADDRESS_REPLY) {
11466 		ddi_put32(mpt->m_acc_free_queue_hdl,
11467 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11468 		    cmd->cmd_rfm);
11469 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11470 		    DDI_DMA_SYNC_FORDEV);
11471 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11472 			mpt->m_free_index = 0;
11473 		}
11474 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11475 		    mpt->m_free_index);
11476 	}
11477 	if (cmd) {
11478 		if (cmd->cmd_extrqslen != 0) {
11479 			rmfree(mpt->m_erqsense_map, cmd->cmd_extrqschunks,
11480 			    cmd->cmd_extrqsidx + 1);
11481 		}
11482 		if (cmd->cmd_flags & CFLAG_PREPARED) {
11483 			mptsas_remove_cmd(mpt, cmd);
11484 			pt_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11485 		}
11486 	}
11487 	if (pt_flags & MPTSAS_REQUEST_POOL_CMD)
11488 		mptsas_return_to_pool(mpt, cmd);
11489 	if (pt_flags & MPTSAS_DATA_ALLOCATED) {
11490 		if (mptsas_check_dma_handle(data_dma_state.handle) !=
11491 		    DDI_SUCCESS) {
11492 			ddi_fm_service_impact(mpt->m_dip,
11493 			    DDI_SERVICE_UNAFFECTED);
11494 			status = EFAULT;
11495 		}
11496 		mptsas_dma_free(&data_dma_state);
11497 	}
11498 	if (pt_flags & MPTSAS_DATAOUT_ALLOCATED) {
11499 		if (mptsas_check_dma_handle(dataout_dma_state.handle) !=
11500 		    DDI_SUCCESS) {
11501 			ddi_fm_service_impact(mpt->m_dip,
11502 			    DDI_SERVICE_UNAFFECTED);
11503 			status = EFAULT;
11504 		}
11505 		mptsas_dma_free(&dataout_dma_state);
11506 	}
11507 	if (pt_flags & MPTSAS_CMD_TIMEOUT) {
11508 		if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
11509 			mptsas_log(mpt, CE_WARN, "mptsas_restart_ioc failed");
11510 		}
11511 	}
11512 	if (request_msg)
11513 		kmem_free(request_msg, request_size);
11514 	NDBG27(("mptsas_do_passthru: Done status 0x%x", status));
11515 
11516 	return (status);
11517 }
11518 
11519 static int
mptsas_pass_thru(mptsas_t * mpt,mptsas_pass_thru_t * data,int mode)11520 mptsas_pass_thru(mptsas_t *mpt, mptsas_pass_thru_t *data, int mode)
11521 {
11522 	/*
11523 	 * If timeout is 0, set timeout to default of 60 seconds.
11524 	 */
11525 	if (data->Timeout == 0) {
11526 		data->Timeout = MPTSAS_PASS_THRU_TIME_DEFAULT;
11527 	}
11528 
11529 	if (((data->DataSize == 0) &&
11530 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_NONE)) ||
11531 	    ((data->DataSize != 0) &&
11532 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_READ) ||
11533 	    (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_WRITE) ||
11534 	    ((data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) &&
11535 	    (data->DataOutSize != 0))))) {
11536 		if (data->DataDirection == MPTSAS_PASS_THRU_DIRECTION_BOTH) {
11537 			data->DataDirection = MPTSAS_PASS_THRU_DIRECTION_READ;
11538 		} else {
11539 			data->DataOutSize = 0;
11540 		}
11541 		/*
11542 		 * Send passthru request messages
11543 		 */
11544 		return (mptsas_do_passthru(mpt,
11545 		    (uint8_t *)((uintptr_t)data->PtrRequest),
11546 		    (uint8_t *)((uintptr_t)data->PtrReply),
11547 		    (uint8_t *)((uintptr_t)data->PtrData),
11548 		    data->RequestSize, data->ReplySize,
11549 		    data->DataSize, data->DataDirection,
11550 		    (uint8_t *)((uintptr_t)data->PtrDataOut),
11551 		    data->DataOutSize, data->Timeout, mode));
11552 	} else {
11553 		return (EINVAL);
11554 	}
11555 }
11556 
11557 static uint8_t
mptsas_get_fw_diag_buffer_number(mptsas_t * mpt,uint32_t unique_id)11558 mptsas_get_fw_diag_buffer_number(mptsas_t *mpt, uint32_t unique_id)
11559 {
11560 	uint8_t	index;
11561 
11562 	for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) {
11563 		if (mpt->m_fw_diag_buffer_list[index].unique_id == unique_id) {
11564 			return (index);
11565 		}
11566 	}
11567 
11568 	return (MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND);
11569 }
11570 
11571 static void
mptsas_start_diag(mptsas_t * mpt,mptsas_cmd_t * cmd)11572 mptsas_start_diag(mptsas_t *mpt, mptsas_cmd_t *cmd)
11573 {
11574 	pMpi2DiagBufferPostRequest_t	pDiag_post_msg;
11575 	pMpi2DiagReleaseRequest_t	pDiag_release_msg;
11576 	struct scsi_pkt			*pkt = cmd->cmd_pkt;
11577 	mptsas_diag_request_t		*diag = pkt->pkt_ha_private;
11578 	uint32_t			i;
11579 	uint64_t			request_desc;
11580 
11581 	ASSERT(mutex_owned(&mpt->m_mutex));
11582 
11583 	/*
11584 	 * Form the diag message depending on the post or release function.
11585 	 */
11586 	if (diag->function == MPI2_FUNCTION_DIAG_BUFFER_POST) {
11587 		pDiag_post_msg = (pMpi2DiagBufferPostRequest_t)
11588 		    (mpt->m_req_frame + (mpt->m_req_frame_size *
11589 		    cmd->cmd_slot));
11590 		bzero(pDiag_post_msg, mpt->m_req_frame_size);
11591 		ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->Function,
11592 		    diag->function);
11593 		ddi_put8(mpt->m_acc_req_frame_hdl, &pDiag_post_msg->BufferType,
11594 		    diag->pBuffer->buffer_type);
11595 		ddi_put8(mpt->m_acc_req_frame_hdl,
11596 		    &pDiag_post_msg->ExtendedType,
11597 		    diag->pBuffer->extended_type);
11598 		ddi_put32(mpt->m_acc_req_frame_hdl,
11599 		    &pDiag_post_msg->BufferLength,
11600 		    diag->pBuffer->buffer_data.size);
11601 		for (i = 0; i < (sizeof (pDiag_post_msg->ProductSpecific) / 4);
11602 		    i++) {
11603 			ddi_put32(mpt->m_acc_req_frame_hdl,
11604 			    &pDiag_post_msg->ProductSpecific[i],
11605 			    diag->pBuffer->product_specific[i]);
11606 		}
11607 		ddi_put32(mpt->m_acc_req_frame_hdl,
11608 		    &pDiag_post_msg->BufferAddress.Low,
11609 		    (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11610 		    & 0xffffffffull));
11611 		ddi_put32(mpt->m_acc_req_frame_hdl,
11612 		    &pDiag_post_msg->BufferAddress.High,
11613 		    (uint32_t)(diag->pBuffer->buffer_data.cookie.dmac_laddress
11614 		    >> 32));
11615 	} else {
11616 		pDiag_release_msg = (pMpi2DiagReleaseRequest_t)
11617 		    (mpt->m_req_frame + (mpt->m_req_frame_size *
11618 		    cmd->cmd_slot));
11619 		bzero(pDiag_release_msg, mpt->m_req_frame_size);
11620 		ddi_put8(mpt->m_acc_req_frame_hdl,
11621 		    &pDiag_release_msg->Function, diag->function);
11622 		ddi_put8(mpt->m_acc_req_frame_hdl,
11623 		    &pDiag_release_msg->BufferType,
11624 		    diag->pBuffer->buffer_type);
11625 	}
11626 
11627 	/*
11628 	 * Send the message
11629 	 */
11630 	(void) ddi_dma_sync(mpt->m_dma_req_frame_hdl, 0, 0,
11631 	    DDI_DMA_SYNC_FORDEV);
11632 	request_desc = (cmd->cmd_slot << 16) +
11633 	    MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
11634 	cmd->cmd_rfm = 0;
11635 	MPTSAS_START_CMD(mpt, request_desc);
11636 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
11637 	    DDI_SUCCESS) ||
11638 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
11639 	    DDI_SUCCESS)) {
11640 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
11641 	}
11642 }
11643 
11644 static int
mptsas_post_fw_diag_buffer(mptsas_t * mpt,mptsas_fw_diagnostic_buffer_t * pBuffer,uint32_t * return_code)11645 mptsas_post_fw_diag_buffer(mptsas_t *mpt,
11646     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code)
11647 {
11648 	mptsas_diag_request_t		diag;
11649 	int				status, slot_num, post_flags = 0;
11650 	mptsas_cmd_t			*cmd = NULL;
11651 	struct scsi_pkt			*pkt;
11652 	pMpi2DiagBufferPostReply_t	reply;
11653 	uint16_t			iocstatus;
11654 	uint32_t			iocloginfo, transfer_length;
11655 
11656 	/*
11657 	 * If buffer is not enabled, just leave.
11658 	 */
11659 	*return_code = MPTSAS_FW_DIAG_ERROR_POST_FAILED;
11660 	if (!pBuffer->enabled) {
11661 		status = DDI_FAILURE;
11662 		goto out;
11663 	}
11664 
11665 	/*
11666 	 * Clear some flags initially.
11667 	 */
11668 	pBuffer->force_release = FALSE;
11669 	pBuffer->valid_data = FALSE;
11670 	pBuffer->owned_by_firmware = FALSE;
11671 
11672 	/*
11673 	 * Get a cmd buffer from the cmd buffer pool
11674 	 */
11675 	if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11676 		status = DDI_FAILURE;
11677 		mptsas_log(mpt, CE_NOTE, "command pool is full: Post FW Diag");
11678 		goto out;
11679 	}
11680 	post_flags |= MPTSAS_REQUEST_POOL_CMD;
11681 
11682 	bzero((caddr_t)cmd, sizeof (*cmd));
11683 	bzero((caddr_t)pkt, scsi_pkt_size());
11684 
11685 	cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11686 
11687 	diag.pBuffer = pBuffer;
11688 	diag.function = MPI2_FUNCTION_DIAG_BUFFER_POST;
11689 
11690 	/*
11691 	 * Form a blank cmd/pkt to store the acknowledgement message
11692 	 */
11693 	pkt->pkt_ha_private	= (opaque_t)&diag;
11694 	pkt->pkt_flags		= FLAG_HEAD;
11695 	pkt->pkt_time		= 60;
11696 	cmd->cmd_pkt		= pkt;
11697 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_FW_DIAG;
11698 
11699 	/*
11700 	 * Save the command in a slot
11701 	 */
11702 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11703 		/*
11704 		 * Once passthru command get slot, set cmd_flags
11705 		 * CFLAG_PREPARED.
11706 		 */
11707 		cmd->cmd_flags |= CFLAG_PREPARED;
11708 		mptsas_start_diag(mpt, cmd);
11709 	} else {
11710 		mptsas_waitq_add(mpt, cmd);
11711 	}
11712 
11713 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11714 		cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11715 	}
11716 
11717 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11718 		status = DDI_FAILURE;
11719 		mptsas_log(mpt, CE_WARN, "Post FW Diag command timeout");
11720 		goto out;
11721 	}
11722 
11723 	/*
11724 	 * cmd_rfm points to the reply message if a reply was given.  Check the
11725 	 * IOCStatus to make sure everything went OK with the FW diag request
11726 	 * and set buffer flags.
11727 	 */
11728 	if (cmd->cmd_rfm) {
11729 		post_flags |= MPTSAS_ADDRESS_REPLY;
11730 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11731 		    DDI_DMA_SYNC_FORCPU);
11732 		reply = (pMpi2DiagBufferPostReply_t)(mpt->m_reply_frame +
11733 		    (cmd->cmd_rfm -
11734 		    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11735 
11736 		/*
11737 		 * Get the reply message data
11738 		 */
11739 		iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11740 		    &reply->IOCStatus);
11741 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11742 		    &reply->IOCLogInfo);
11743 		transfer_length = ddi_get32(mpt->m_acc_reply_frame_hdl,
11744 		    &reply->TransferLength);
11745 
11746 		/*
11747 		 * If post failed quit.
11748 		 */
11749 		if (iocstatus != MPI2_IOCSTATUS_SUCCESS) {
11750 			status = DDI_FAILURE;
11751 			NDBG13(("post FW Diag Buffer failed: IOCStatus=0x%x, "
11752 			    "IOCLogInfo=0x%x, TransferLength=0x%x", iocstatus,
11753 			    iocloginfo, transfer_length));
11754 			goto out;
11755 		}
11756 
11757 		/*
11758 		 * Post was successful.
11759 		 */
11760 		pBuffer->valid_data = TRUE;
11761 		pBuffer->owned_by_firmware = TRUE;
11762 		*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11763 		status = DDI_SUCCESS;
11764 	}
11765 
11766 out:
11767 	/*
11768 	 * Put the reply frame back on the free queue, increment the free
11769 	 * index, and write the new index to the free index register.  But only
11770 	 * if this reply is an ADDRESS reply.
11771 	 */
11772 	if (post_flags & MPTSAS_ADDRESS_REPLY) {
11773 		ddi_put32(mpt->m_acc_free_queue_hdl,
11774 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11775 		    cmd->cmd_rfm);
11776 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11777 		    DDI_DMA_SYNC_FORDEV);
11778 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11779 			mpt->m_free_index = 0;
11780 		}
11781 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11782 		    mpt->m_free_index);
11783 	}
11784 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11785 		mptsas_remove_cmd(mpt, cmd);
11786 		post_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11787 	}
11788 	if (post_flags & MPTSAS_REQUEST_POOL_CMD) {
11789 		mptsas_return_to_pool(mpt, cmd);
11790 	}
11791 
11792 	return (status);
11793 }
11794 
11795 static int
mptsas_release_fw_diag_buffer(mptsas_t * mpt,mptsas_fw_diagnostic_buffer_t * pBuffer,uint32_t * return_code,uint32_t diag_type)11796 mptsas_release_fw_diag_buffer(mptsas_t *mpt,
11797     mptsas_fw_diagnostic_buffer_t *pBuffer, uint32_t *return_code,
11798     uint32_t diag_type)
11799 {
11800 	mptsas_diag_request_t	diag;
11801 	int			status, slot_num, rel_flags = 0;
11802 	mptsas_cmd_t		*cmd = NULL;
11803 	struct scsi_pkt		*pkt;
11804 	pMpi2DiagReleaseReply_t	reply;
11805 	uint16_t		iocstatus;
11806 	uint32_t		iocloginfo;
11807 
11808 	/*
11809 	 * If buffer is not enabled, just leave.
11810 	 */
11811 	*return_code = MPTSAS_FW_DIAG_ERROR_RELEASE_FAILED;
11812 	if (!pBuffer->enabled) {
11813 		mptsas_log(mpt, CE_NOTE, "This buffer type is not supported "
11814 		    "by the IOC");
11815 		status = DDI_FAILURE;
11816 		goto out;
11817 	}
11818 
11819 	/*
11820 	 * Clear some flags initially.
11821 	 */
11822 	pBuffer->force_release = FALSE;
11823 	pBuffer->valid_data = FALSE;
11824 	pBuffer->owned_by_firmware = FALSE;
11825 
11826 	/*
11827 	 * Get a cmd buffer from the cmd buffer pool
11828 	 */
11829 	if ((slot_num = (mptsas_request_from_pool(mpt, &cmd, &pkt))) == -1) {
11830 		status = DDI_FAILURE;
11831 		mptsas_log(mpt, CE_NOTE, "command pool is full: Release FW "
11832 		    "Diag");
11833 		goto out;
11834 	}
11835 	rel_flags |= MPTSAS_REQUEST_POOL_CMD;
11836 
11837 	bzero((caddr_t)cmd, sizeof (*cmd));
11838 	bzero((caddr_t)pkt, scsi_pkt_size());
11839 
11840 	cmd->ioc_cmd_slot = (uint32_t)(slot_num);
11841 
11842 	diag.pBuffer = pBuffer;
11843 	diag.function = MPI2_FUNCTION_DIAG_RELEASE;
11844 
11845 	/*
11846 	 * Form a blank cmd/pkt to store the acknowledgement message
11847 	 */
11848 	pkt->pkt_ha_private	= (opaque_t)&diag;
11849 	pkt->pkt_flags		= FLAG_HEAD;
11850 	pkt->pkt_time		= 60;
11851 	cmd->cmd_pkt		= pkt;
11852 	cmd->cmd_flags		= CFLAG_CMDIOC | CFLAG_FW_DIAG;
11853 
11854 	/*
11855 	 * Save the command in a slot
11856 	 */
11857 	if (mptsas_save_cmd(mpt, cmd) == TRUE) {
11858 		/*
11859 		 * Once passthru command get slot, set cmd_flags
11860 		 * CFLAG_PREPARED.
11861 		 */
11862 		cmd->cmd_flags |= CFLAG_PREPARED;
11863 		mptsas_start_diag(mpt, cmd);
11864 	} else {
11865 		mptsas_waitq_add(mpt, cmd);
11866 	}
11867 
11868 	while ((cmd->cmd_flags & CFLAG_FINISHED) == 0) {
11869 		cv_wait(&mpt->m_fw_diag_cv, &mpt->m_mutex);
11870 	}
11871 
11872 	if (cmd->cmd_flags & CFLAG_TIMEOUT) {
11873 		status = DDI_FAILURE;
11874 		mptsas_log(mpt, CE_WARN, "Release FW Diag command timeout");
11875 		goto out;
11876 	}
11877 
11878 	/*
11879 	 * cmd_rfm points to the reply message if a reply was given.  Check the
11880 	 * IOCStatus to make sure everything went OK with the FW diag request
11881 	 * and set buffer flags.
11882 	 */
11883 	if (cmd->cmd_rfm) {
11884 		rel_flags |= MPTSAS_ADDRESS_REPLY;
11885 		(void) ddi_dma_sync(mpt->m_dma_reply_frame_hdl, 0, 0,
11886 		    DDI_DMA_SYNC_FORCPU);
11887 		reply = (pMpi2DiagReleaseReply_t)(mpt->m_reply_frame +
11888 		    (cmd->cmd_rfm -
11889 		    (mpt->m_reply_frame_dma_addr & 0xffffffffu)));
11890 
11891 		/*
11892 		 * Get the reply message data
11893 		 */
11894 		iocstatus = ddi_get16(mpt->m_acc_reply_frame_hdl,
11895 		    &reply->IOCStatus);
11896 		iocloginfo = ddi_get32(mpt->m_acc_reply_frame_hdl,
11897 		    &reply->IOCLogInfo);
11898 
11899 		/*
11900 		 * If release failed quit.
11901 		 */
11902 		if ((iocstatus != MPI2_IOCSTATUS_SUCCESS) ||
11903 		    pBuffer->owned_by_firmware) {
11904 			status = DDI_FAILURE;
11905 			NDBG13(("release FW Diag Buffer failed: "
11906 			    "IOCStatus=0x%x, IOCLogInfo=0x%x", iocstatus,
11907 			    iocloginfo));
11908 			goto out;
11909 		}
11910 
11911 		/*
11912 		 * Release was successful.
11913 		 */
11914 		*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
11915 		status = DDI_SUCCESS;
11916 
11917 		/*
11918 		 * If this was for an UNREGISTER diag type command, clear the
11919 		 * unique ID.
11920 		 */
11921 		if (diag_type == MPTSAS_FW_DIAG_TYPE_UNREGISTER) {
11922 			pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
11923 		}
11924 	}
11925 
11926 out:
11927 	/*
11928 	 * Put the reply frame back on the free queue, increment the free
11929 	 * index, and write the new index to the free index register.  But only
11930 	 * if this reply is an ADDRESS reply.
11931 	 */
11932 	if (rel_flags & MPTSAS_ADDRESS_REPLY) {
11933 		ddi_put32(mpt->m_acc_free_queue_hdl,
11934 		    &((uint32_t *)(void *)mpt->m_free_queue)[mpt->m_free_index],
11935 		    cmd->cmd_rfm);
11936 		(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
11937 		    DDI_DMA_SYNC_FORDEV);
11938 		if (++mpt->m_free_index == mpt->m_free_queue_depth) {
11939 			mpt->m_free_index = 0;
11940 		}
11941 		ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex,
11942 		    mpt->m_free_index);
11943 	}
11944 	if (cmd && (cmd->cmd_flags & CFLAG_PREPARED)) {
11945 		mptsas_remove_cmd(mpt, cmd);
11946 		rel_flags &= (~MPTSAS_REQUEST_POOL_CMD);
11947 	}
11948 	if (rel_flags & MPTSAS_REQUEST_POOL_CMD) {
11949 		mptsas_return_to_pool(mpt, cmd);
11950 	}
11951 
11952 	return (status);
11953 }
11954 
11955 static int
mptsas_diag_register(mptsas_t * mpt,mptsas_fw_diag_register_t * diag_register,uint32_t * return_code)11956 mptsas_diag_register(mptsas_t *mpt, mptsas_fw_diag_register_t *diag_register,
11957     uint32_t *return_code)
11958 {
11959 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
11960 	uint8_t				extended_type, buffer_type, i;
11961 	uint32_t			buffer_size;
11962 	uint32_t			unique_id;
11963 	int				status;
11964 
11965 	ASSERT(mutex_owned(&mpt->m_mutex));
11966 
11967 	extended_type = diag_register->ExtendedType;
11968 	buffer_type = diag_register->BufferType;
11969 	buffer_size = diag_register->RequestedBufferSize;
11970 	unique_id = diag_register->UniqueId;
11971 
11972 	/*
11973 	 * Check for valid buffer type
11974 	 */
11975 	if (buffer_type >= MPI2_DIAG_BUF_TYPE_COUNT) {
11976 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
11977 		return (DDI_FAILURE);
11978 	}
11979 
11980 	/*
11981 	 * Get the current buffer and look up the unique ID.  The unique ID
11982 	 * should not be found.  If it is, the ID is already in use.
11983 	 */
11984 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
11985 	pBuffer = &mpt->m_fw_diag_buffer_list[buffer_type];
11986 	if (i != MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
11987 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11988 		return (DDI_FAILURE);
11989 	}
11990 
11991 	/*
11992 	 * The buffer's unique ID should not be registered yet, and the given
11993 	 * unique ID cannot be 0.
11994 	 */
11995 	if ((pBuffer->unique_id != MPTSAS_FW_DIAG_INVALID_UID) ||
11996 	    (unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
11997 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
11998 		return (DDI_FAILURE);
11999 	}
12000 
12001 	/*
12002 	 * If this buffer is already posted as immediate, just change owner.
12003 	 */
12004 	if (pBuffer->immediate && pBuffer->owned_by_firmware &&
12005 	    (pBuffer->unique_id == MPTSAS_FW_DIAG_INVALID_UID)) {
12006 		pBuffer->immediate = FALSE;
12007 		pBuffer->unique_id = unique_id;
12008 		return (DDI_SUCCESS);
12009 	}
12010 
12011 	/*
12012 	 * Post a new buffer after checking if it's enabled.  The DMA buffer
12013 	 * that is allocated will be contiguous (sgl_len = 1).
12014 	 */
12015 	if (!pBuffer->enabled) {
12016 		*return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
12017 		return (DDI_FAILURE);
12018 	}
12019 	bzero(&pBuffer->buffer_data, sizeof (mptsas_dma_alloc_state_t));
12020 	pBuffer->buffer_data.size = buffer_size;
12021 	if (mptsas_dma_alloc(mpt, &pBuffer->buffer_data) != DDI_SUCCESS) {
12022 		mptsas_log(mpt, CE_WARN, "failed to alloc DMA resource for "
12023 		    "diag buffer: size = %d bytes", buffer_size);
12024 		*return_code = MPTSAS_FW_DIAG_ERROR_NO_BUFFER;
12025 		return (DDI_FAILURE);
12026 	}
12027 
12028 	/*
12029 	 * Copy the given info to the diag buffer and post the buffer.
12030 	 */
12031 	pBuffer->buffer_type = buffer_type;
12032 	pBuffer->immediate = FALSE;
12033 	if (buffer_type == MPI2_DIAG_BUF_TYPE_TRACE) {
12034 		for (i = 0; i < (sizeof (pBuffer->product_specific) / 4);
12035 		    i++) {
12036 			pBuffer->product_specific[i] =
12037 			    diag_register->ProductSpecific[i];
12038 		}
12039 	}
12040 	pBuffer->extended_type = extended_type;
12041 	pBuffer->unique_id = unique_id;
12042 	status = mptsas_post_fw_diag_buffer(mpt, pBuffer, return_code);
12043 
12044 	if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
12045 	    DDI_SUCCESS) {
12046 		mptsas_log(mpt, CE_WARN, "Check of DMA handle failed in "
12047 		    "mptsas_diag_register.");
12048 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
12049 		status = DDI_FAILURE;
12050 	}
12051 
12052 	/*
12053 	 * In case there was a failure, free the DMA buffer.
12054 	 */
12055 	if (status == DDI_FAILURE) {
12056 		mptsas_dma_free(&pBuffer->buffer_data);
12057 	}
12058 
12059 	return (status);
12060 }
12061 
12062 static int
mptsas_diag_unregister(mptsas_t * mpt,mptsas_fw_diag_unregister_t * diag_unregister,uint32_t * return_code)12063 mptsas_diag_unregister(mptsas_t *mpt,
12064     mptsas_fw_diag_unregister_t *diag_unregister, uint32_t *return_code)
12065 {
12066 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
12067 	uint8_t				i;
12068 	uint32_t			unique_id;
12069 	int				status;
12070 
12071 	ASSERT(mutex_owned(&mpt->m_mutex));
12072 
12073 	unique_id = diag_unregister->UniqueId;
12074 
12075 	/*
12076 	 * Get the current buffer and look up the unique ID.  The unique ID
12077 	 * should be there.
12078 	 */
12079 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12080 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12081 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12082 		return (DDI_FAILURE);
12083 	}
12084 
12085 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
12086 
12087 	/*
12088 	 * Try to release the buffer from FW before freeing it.  If release
12089 	 * fails, don't free the DMA buffer in case FW tries to access it
12090 	 * later.  If buffer is not owned by firmware, can't release it.
12091 	 */
12092 	if (!pBuffer->owned_by_firmware) {
12093 		status = DDI_SUCCESS;
12094 	} else {
12095 		status = mptsas_release_fw_diag_buffer(mpt, pBuffer,
12096 		    return_code, MPTSAS_FW_DIAG_TYPE_UNREGISTER);
12097 	}
12098 
12099 	/*
12100 	 * At this point, return the current status no matter what happens with
12101 	 * the DMA buffer.
12102 	 */
12103 	pBuffer->unique_id = MPTSAS_FW_DIAG_INVALID_UID;
12104 	if (status == DDI_SUCCESS) {
12105 		if (mptsas_check_dma_handle(pBuffer->buffer_data.handle) !=
12106 		    DDI_SUCCESS) {
12107 			mptsas_log(mpt, CE_WARN, "Check of DMA handle failed "
12108 			    "in mptsas_diag_unregister.");
12109 			ddi_fm_service_impact(mpt->m_dip,
12110 			    DDI_SERVICE_UNAFFECTED);
12111 		}
12112 		mptsas_dma_free(&pBuffer->buffer_data);
12113 	}
12114 
12115 	return (status);
12116 }
12117 
12118 static int
mptsas_diag_query(mptsas_t * mpt,mptsas_fw_diag_query_t * diag_query,uint32_t * return_code)12119 mptsas_diag_query(mptsas_t *mpt, mptsas_fw_diag_query_t *diag_query,
12120     uint32_t *return_code)
12121 {
12122 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
12123 	uint8_t				i;
12124 	uint32_t			unique_id;
12125 
12126 	ASSERT(mutex_owned(&mpt->m_mutex));
12127 
12128 	unique_id = diag_query->UniqueId;
12129 
12130 	/*
12131 	 * If ID is valid, query on ID.
12132 	 * If ID is invalid, query on buffer type.
12133 	 */
12134 	if (unique_id == MPTSAS_FW_DIAG_INVALID_UID) {
12135 		i = diag_query->BufferType;
12136 		if (i >= MPI2_DIAG_BUF_TYPE_COUNT) {
12137 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12138 			return (DDI_FAILURE);
12139 		}
12140 	} else {
12141 		i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12142 		if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12143 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12144 			return (DDI_FAILURE);
12145 		}
12146 	}
12147 
12148 	/*
12149 	 * Fill query structure with the diag buffer info.
12150 	 */
12151 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
12152 	diag_query->BufferType = pBuffer->buffer_type;
12153 	diag_query->ExtendedType = pBuffer->extended_type;
12154 	if (diag_query->BufferType == MPI2_DIAG_BUF_TYPE_TRACE) {
12155 		for (i = 0; i < (sizeof (diag_query->ProductSpecific) / 4);
12156 		    i++) {
12157 			diag_query->ProductSpecific[i] =
12158 			    pBuffer->product_specific[i];
12159 		}
12160 	}
12161 	diag_query->TotalBufferSize = pBuffer->buffer_data.size;
12162 	diag_query->DriverAddedBufferSize = 0;
12163 	diag_query->UniqueId = pBuffer->unique_id;
12164 	diag_query->ApplicationFlags = 0;
12165 	diag_query->DiagnosticFlags = 0;
12166 
12167 	/*
12168 	 * Set/Clear application flags
12169 	 */
12170 	if (pBuffer->immediate) {
12171 		diag_query->ApplicationFlags &= ~MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12172 	} else {
12173 		diag_query->ApplicationFlags |= MPTSAS_FW_DIAG_FLAG_APP_OWNED;
12174 	}
12175 	if (pBuffer->valid_data || pBuffer->owned_by_firmware) {
12176 		diag_query->ApplicationFlags |=
12177 		    MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12178 	} else {
12179 		diag_query->ApplicationFlags &=
12180 		    ~MPTSAS_FW_DIAG_FLAG_BUFFER_VALID;
12181 	}
12182 	if (pBuffer->owned_by_firmware) {
12183 		diag_query->ApplicationFlags |=
12184 		    MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12185 	} else {
12186 		diag_query->ApplicationFlags &=
12187 		    ~MPTSAS_FW_DIAG_FLAG_FW_BUFFER_ACCESS;
12188 	}
12189 
12190 	return (DDI_SUCCESS);
12191 }
12192 
12193 static int
mptsas_diag_read_buffer(mptsas_t * mpt,mptsas_diag_read_buffer_t * diag_read_buffer,uint8_t * ioctl_buf,uint32_t * return_code,int ioctl_mode)12194 mptsas_diag_read_buffer(mptsas_t *mpt,
12195     mptsas_diag_read_buffer_t *diag_read_buffer, uint8_t *ioctl_buf,
12196     uint32_t *return_code, int ioctl_mode)
12197 {
12198 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
12199 	uint8_t				i, *pData;
12200 	uint32_t			unique_id, byte;
12201 	int				status;
12202 
12203 	ASSERT(mutex_owned(&mpt->m_mutex));
12204 
12205 	unique_id = diag_read_buffer->UniqueId;
12206 
12207 	/*
12208 	 * Get the current buffer and look up the unique ID.  The unique ID
12209 	 * should be there.
12210 	 */
12211 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12212 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12213 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12214 		return (DDI_FAILURE);
12215 	}
12216 
12217 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
12218 
12219 	/*
12220 	 * Make sure requested read is within limits
12221 	 */
12222 	if (diag_read_buffer->StartingOffset + diag_read_buffer->BytesToRead >
12223 	    pBuffer->buffer_data.size) {
12224 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12225 		return (DDI_FAILURE);
12226 	}
12227 
12228 	/*
12229 	 * Copy the requested data from DMA to the diag_read_buffer.  The DMA
12230 	 * buffer that was allocated is one contiguous buffer.
12231 	 */
12232 	pData = (uint8_t *)(pBuffer->buffer_data.memp +
12233 	    diag_read_buffer->StartingOffset);
12234 	(void) ddi_dma_sync(pBuffer->buffer_data.handle, 0, 0,
12235 	    DDI_DMA_SYNC_FORCPU);
12236 	for (byte = 0; byte < diag_read_buffer->BytesToRead; byte++) {
12237 		if (ddi_copyout(pData + byte, ioctl_buf + byte, 1, ioctl_mode)
12238 		    != 0) {
12239 			return (DDI_FAILURE);
12240 		}
12241 	}
12242 	diag_read_buffer->Status = 0;
12243 
12244 	/*
12245 	 * Set or clear the Force Release flag.
12246 	 */
12247 	if (pBuffer->force_release) {
12248 		diag_read_buffer->Flags |= MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12249 	} else {
12250 		diag_read_buffer->Flags &= ~MPTSAS_FW_DIAG_FLAG_FORCE_RELEASE;
12251 	}
12252 
12253 	/*
12254 	 * If buffer is to be reregistered, make sure it's not already owned by
12255 	 * firmware first.
12256 	 */
12257 	status = DDI_SUCCESS;
12258 	if (!pBuffer->owned_by_firmware) {
12259 		if (diag_read_buffer->Flags & MPTSAS_FW_DIAG_FLAG_REREGISTER) {
12260 			status = mptsas_post_fw_diag_buffer(mpt, pBuffer,
12261 			    return_code);
12262 		}
12263 	}
12264 
12265 	return (status);
12266 }
12267 
12268 static int
mptsas_diag_release(mptsas_t * mpt,mptsas_fw_diag_release_t * diag_release,uint32_t * return_code)12269 mptsas_diag_release(mptsas_t *mpt, mptsas_fw_diag_release_t *diag_release,
12270     uint32_t *return_code)
12271 {
12272 	mptsas_fw_diagnostic_buffer_t	*pBuffer;
12273 	uint8_t				i;
12274 	uint32_t			unique_id;
12275 	int				status;
12276 
12277 	ASSERT(mutex_owned(&mpt->m_mutex));
12278 
12279 	unique_id = diag_release->UniqueId;
12280 
12281 	/*
12282 	 * Get the current buffer and look up the unique ID.  The unique ID
12283 	 * should be there.
12284 	 */
12285 	i = mptsas_get_fw_diag_buffer_number(mpt, unique_id);
12286 	if (i == MPTSAS_FW_DIAGNOSTIC_UID_NOT_FOUND) {
12287 		*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_UID;
12288 		return (DDI_FAILURE);
12289 	}
12290 
12291 	pBuffer = &mpt->m_fw_diag_buffer_list[i];
12292 
12293 	/*
12294 	 * If buffer is not owned by firmware, it's already been released.
12295 	 */
12296 	if (!pBuffer->owned_by_firmware) {
12297 		*return_code = MPTSAS_FW_DIAG_ERROR_ALREADY_RELEASED;
12298 		return (DDI_FAILURE);
12299 	}
12300 
12301 	/*
12302 	 * Release the buffer.
12303 	 */
12304 	status = mptsas_release_fw_diag_buffer(mpt, pBuffer, return_code,
12305 	    MPTSAS_FW_DIAG_TYPE_RELEASE);
12306 	return (status);
12307 }
12308 
12309 static int
mptsas_do_diag_action(mptsas_t * mpt,uint32_t action,uint8_t * diag_action,uint32_t length,uint32_t * return_code,int ioctl_mode)12310 mptsas_do_diag_action(mptsas_t *mpt, uint32_t action, uint8_t *diag_action,
12311     uint32_t length, uint32_t *return_code, int ioctl_mode)
12312 {
12313 	mptsas_fw_diag_register_t	diag_register;
12314 	mptsas_fw_diag_unregister_t	diag_unregister;
12315 	mptsas_fw_diag_query_t		diag_query;
12316 	mptsas_diag_read_buffer_t	diag_read_buffer;
12317 	mptsas_fw_diag_release_t	diag_release;
12318 	int				status = DDI_SUCCESS;
12319 	uint32_t			original_return_code, read_buf_len;
12320 
12321 	ASSERT(mutex_owned(&mpt->m_mutex));
12322 
12323 	original_return_code = *return_code;
12324 	*return_code = MPTSAS_FW_DIAG_ERROR_SUCCESS;
12325 
12326 	switch (action) {
12327 		case MPTSAS_FW_DIAG_TYPE_REGISTER:
12328 			if (!length) {
12329 				*return_code =
12330 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12331 				status = DDI_FAILURE;
12332 				break;
12333 			}
12334 			if (ddi_copyin(diag_action, &diag_register,
12335 			    sizeof (diag_register), ioctl_mode) != 0) {
12336 				return (DDI_FAILURE);
12337 			}
12338 			status = mptsas_diag_register(mpt, &diag_register,
12339 			    return_code);
12340 			break;
12341 
12342 		case MPTSAS_FW_DIAG_TYPE_UNREGISTER:
12343 			if (length < sizeof (diag_unregister)) {
12344 				*return_code =
12345 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12346 				status = DDI_FAILURE;
12347 				break;
12348 			}
12349 			if (ddi_copyin(diag_action, &diag_unregister,
12350 			    sizeof (diag_unregister), ioctl_mode) != 0) {
12351 				return (DDI_FAILURE);
12352 			}
12353 			status = mptsas_diag_unregister(mpt, &diag_unregister,
12354 			    return_code);
12355 			break;
12356 
12357 		case MPTSAS_FW_DIAG_TYPE_QUERY:
12358 			if (length < sizeof (diag_query)) {
12359 				*return_code =
12360 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12361 				status = DDI_FAILURE;
12362 				break;
12363 			}
12364 			if (ddi_copyin(diag_action, &diag_query,
12365 			    sizeof (diag_query), ioctl_mode) != 0) {
12366 				return (DDI_FAILURE);
12367 			}
12368 			status = mptsas_diag_query(mpt, &diag_query,
12369 			    return_code);
12370 			if (status == DDI_SUCCESS) {
12371 				if (ddi_copyout(&diag_query, diag_action,
12372 				    sizeof (diag_query), ioctl_mode) != 0) {
12373 					return (DDI_FAILURE);
12374 				}
12375 			}
12376 			break;
12377 
12378 		case MPTSAS_FW_DIAG_TYPE_READ_BUFFER:
12379 			if (ddi_copyin(diag_action, &diag_read_buffer,
12380 			    sizeof (diag_read_buffer) - 4, ioctl_mode) != 0) {
12381 				return (DDI_FAILURE);
12382 			}
12383 			read_buf_len = sizeof (diag_read_buffer) -
12384 			    sizeof (diag_read_buffer.DataBuffer) +
12385 			    diag_read_buffer.BytesToRead;
12386 			if (length < read_buf_len) {
12387 				*return_code =
12388 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12389 				status = DDI_FAILURE;
12390 				break;
12391 			}
12392 			status = mptsas_diag_read_buffer(mpt,
12393 			    &diag_read_buffer, diag_action +
12394 			    sizeof (diag_read_buffer) - 4, return_code,
12395 			    ioctl_mode);
12396 			if (status == DDI_SUCCESS) {
12397 				if (ddi_copyout(&diag_read_buffer, diag_action,
12398 				    sizeof (diag_read_buffer) - 4, ioctl_mode)
12399 				    != 0) {
12400 					return (DDI_FAILURE);
12401 				}
12402 			}
12403 			break;
12404 
12405 		case MPTSAS_FW_DIAG_TYPE_RELEASE:
12406 			if (length < sizeof (diag_release)) {
12407 				*return_code =
12408 				    MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12409 				status = DDI_FAILURE;
12410 				break;
12411 			}
12412 			if (ddi_copyin(diag_action, &diag_release,
12413 			    sizeof (diag_release), ioctl_mode) != 0) {
12414 				return (DDI_FAILURE);
12415 			}
12416 			status = mptsas_diag_release(mpt, &diag_release,
12417 			    return_code);
12418 			break;
12419 
12420 		default:
12421 			*return_code = MPTSAS_FW_DIAG_ERROR_INVALID_PARAMETER;
12422 			status = DDI_FAILURE;
12423 			break;
12424 	}
12425 
12426 	if ((status == DDI_FAILURE) &&
12427 	    (original_return_code == MPTSAS_FW_DIAG_NEW) &&
12428 	    (*return_code != MPTSAS_FW_DIAG_ERROR_SUCCESS)) {
12429 		status = DDI_SUCCESS;
12430 	}
12431 
12432 	return (status);
12433 }
12434 
12435 static int
mptsas_diag_action(mptsas_t * mpt,mptsas_diag_action_t * user_data,int mode)12436 mptsas_diag_action(mptsas_t *mpt, mptsas_diag_action_t *user_data, int mode)
12437 {
12438 	int			status;
12439 	mptsas_diag_action_t	driver_data;
12440 
12441 	ASSERT(mutex_owned(&mpt->m_mutex));
12442 
12443 	/*
12444 	 * Copy the user data to a driver data buffer.
12445 	 */
12446 	if (ddi_copyin(user_data, &driver_data, sizeof (mptsas_diag_action_t),
12447 	    mode) == 0) {
12448 		/*
12449 		 * Send diag action request if Action is valid
12450 		 */
12451 		if (driver_data.Action == MPTSAS_FW_DIAG_TYPE_REGISTER ||
12452 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_UNREGISTER ||
12453 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_QUERY ||
12454 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_READ_BUFFER ||
12455 		    driver_data.Action == MPTSAS_FW_DIAG_TYPE_RELEASE) {
12456 			status = mptsas_do_diag_action(mpt, driver_data.Action,
12457 			    (void *)(uintptr_t)driver_data.PtrDiagAction,
12458 			    driver_data.Length, &driver_data.ReturnCode,
12459 			    mode);
12460 			if (status == DDI_SUCCESS) {
12461 				if (ddi_copyout(&driver_data.ReturnCode,
12462 				    &user_data->ReturnCode,
12463 				    sizeof (user_data->ReturnCode), mode)
12464 				    != 0) {
12465 					status = EFAULT;
12466 				} else {
12467 					status = 0;
12468 				}
12469 			} else {
12470 				status = EIO;
12471 			}
12472 		} else {
12473 			status = EINVAL;
12474 		}
12475 	} else {
12476 		status = EFAULT;
12477 	}
12478 
12479 	return (status);
12480 }
12481 
12482 /*
12483  * This routine handles the "event query" ioctl.
12484  */
12485 static int
mptsas_event_query(mptsas_t * mpt,mptsas_event_query_t * data,int mode,int * rval)12486 mptsas_event_query(mptsas_t *mpt, mptsas_event_query_t *data, int mode,
12487     int *rval)
12488 {
12489 	int			status;
12490 	mptsas_event_query_t	driverdata;
12491 	uint8_t			i;
12492 
12493 	driverdata.Entries = MPTSAS_EVENT_QUEUE_SIZE;
12494 
12495 	mutex_enter(&mpt->m_mutex);
12496 	for (i = 0; i < 4; i++) {
12497 		driverdata.Types[i] = mpt->m_event_mask[i];
12498 	}
12499 	mutex_exit(&mpt->m_mutex);
12500 
12501 	if (ddi_copyout(&driverdata, data, sizeof (driverdata), mode) != 0) {
12502 		status = EFAULT;
12503 	} else {
12504 		*rval = MPTIOCTL_STATUS_GOOD;
12505 		status = 0;
12506 	}
12507 
12508 	return (status);
12509 }
12510 
12511 /*
12512  * This routine handles the "event enable" ioctl.
12513  */
12514 static int
mptsas_event_enable(mptsas_t * mpt,mptsas_event_enable_t * data,int mode,int * rval)12515 mptsas_event_enable(mptsas_t *mpt, mptsas_event_enable_t *data, int mode,
12516     int *rval)
12517 {
12518 	int			status;
12519 	mptsas_event_enable_t	driverdata;
12520 	uint8_t			i;
12521 
12522 	if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12523 		mutex_enter(&mpt->m_mutex);
12524 		for (i = 0; i < 4; i++) {
12525 			mpt->m_event_mask[i] = driverdata.Types[i];
12526 		}
12527 		mutex_exit(&mpt->m_mutex);
12528 
12529 		*rval = MPTIOCTL_STATUS_GOOD;
12530 		status = 0;
12531 	} else {
12532 		status = EFAULT;
12533 	}
12534 	return (status);
12535 }
12536 
12537 /*
12538  * This routine handles the "event report" ioctl.
12539  */
12540 static int
mptsas_event_report(mptsas_t * mpt,mptsas_event_report_t * data,int mode,int * rval)12541 mptsas_event_report(mptsas_t *mpt, mptsas_event_report_t *data, int mode,
12542     int *rval)
12543 {
12544 	int			status;
12545 	mptsas_event_report_t	driverdata;
12546 
12547 	mutex_enter(&mpt->m_mutex);
12548 
12549 	if (ddi_copyin(&data->Size, &driverdata.Size, sizeof (driverdata.Size),
12550 	    mode) == 0) {
12551 		if (driverdata.Size >= sizeof (mpt->m_events)) {
12552 			if (ddi_copyout(mpt->m_events, data->Events,
12553 			    sizeof (mpt->m_events), mode) != 0) {
12554 				status = EFAULT;
12555 			} else {
12556 				if (driverdata.Size > sizeof (mpt->m_events)) {
12557 					driverdata.Size =
12558 					    sizeof (mpt->m_events);
12559 					if (ddi_copyout(&driverdata.Size,
12560 					    &data->Size,
12561 					    sizeof (driverdata.Size),
12562 					    mode) != 0) {
12563 						status = EFAULT;
12564 					} else {
12565 						*rval = MPTIOCTL_STATUS_GOOD;
12566 						status = 0;
12567 					}
12568 				} else {
12569 					*rval = MPTIOCTL_STATUS_GOOD;
12570 					status = 0;
12571 				}
12572 			}
12573 		} else {
12574 			*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
12575 			status = 0;
12576 		}
12577 	} else {
12578 		status = EFAULT;
12579 	}
12580 
12581 	mutex_exit(&mpt->m_mutex);
12582 	return (status);
12583 }
12584 
12585 static void
mptsas_lookup_pci_data(mptsas_t * mpt,mptsas_adapter_data_t * adapter_data)12586 mptsas_lookup_pci_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12587 {
12588 	int	*reg_data;
12589 	uint_t	reglen;
12590 
12591 	/*
12592 	 * Lookup the 'reg' property and extract the other data
12593 	 */
12594 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12595 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
12596 	    DDI_PROP_SUCCESS) {
12597 		/*
12598 		 * Extract the PCI data from the 'reg' property first DWORD.
12599 		 * The entry looks like the following:
12600 		 * First DWORD:
12601 		 * Bits 0 - 7 8-bit Register number
12602 		 * Bits 8 - 10 3-bit Function number
12603 		 * Bits 11 - 15 5-bit Device number
12604 		 * Bits 16 - 23 8-bit Bus number
12605 		 * Bits 24 - 25 2-bit Address Space type identifier
12606 		 *
12607 		 */
12608 		adapter_data->PciInformation.u.bits.BusNumber =
12609 		    (reg_data[0] & 0x00FF0000) >> 16;
12610 		adapter_data->PciInformation.u.bits.DeviceNumber =
12611 		    (reg_data[0] & 0x0000F800) >> 11;
12612 		adapter_data->PciInformation.u.bits.FunctionNumber =
12613 		    (reg_data[0] & 0x00000700) >> 8;
12614 		ddi_prop_free((void *)reg_data);
12615 	} else {
12616 		/*
12617 		 * If we can't determine the PCI data then we fill in FF's for
12618 		 * the data to indicate this.
12619 		 */
12620 		adapter_data->PCIDeviceHwId = 0xFFFFFFFF;
12621 		adapter_data->MpiPortNumber = 0xFFFFFFFF;
12622 		adapter_data->PciInformation.u.AsDWORD = 0xFFFFFFFF;
12623 	}
12624 
12625 	/*
12626 	 * Saved in the mpt->m_fwversion
12627 	 */
12628 	adapter_data->MpiFirmwareVersion = mpt->m_fwversion;
12629 }
12630 
12631 static void
mptsas_read_adapter_data(mptsas_t * mpt,mptsas_adapter_data_t * adapter_data)12632 mptsas_read_adapter_data(mptsas_t *mpt, mptsas_adapter_data_t *adapter_data)
12633 {
12634 	char	*driver_verstr = MPTSAS_MOD_STRING;
12635 
12636 	mptsas_lookup_pci_data(mpt, adapter_data);
12637 	adapter_data->AdapterType = mpt->m_MPI25 ?
12638 	    MPTIOCTL_ADAPTER_TYPE_SAS3 :
12639 	    MPTIOCTL_ADAPTER_TYPE_SAS2;
12640 	adapter_data->PCIDeviceHwId = (uint32_t)mpt->m_devid;
12641 	adapter_data->PCIDeviceHwRev = (uint32_t)mpt->m_revid;
12642 	adapter_data->SubSystemId = (uint32_t)mpt->m_ssid;
12643 	adapter_data->SubsystemVendorId = (uint32_t)mpt->m_svid;
12644 	(void) strcpy((char *)&adapter_data->DriverVersion[0], driver_verstr);
12645 	adapter_data->BiosVersion = 0;
12646 	(void) mptsas_get_bios_page3(mpt, &adapter_data->BiosVersion);
12647 }
12648 
12649 static void
mptsas_read_pci_info(mptsas_t * mpt,mptsas_pci_info_t * pci_info)12650 mptsas_read_pci_info(mptsas_t *mpt, mptsas_pci_info_t *pci_info)
12651 {
12652 	int	*reg_data, i;
12653 	uint_t	reglen;
12654 
12655 	/*
12656 	 * Lookup the 'reg' property and extract the other data
12657 	 */
12658 	if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, mpt->m_dip,
12659 	    DDI_PROP_DONTPASS, "reg", &reg_data, &reglen) ==
12660 	    DDI_PROP_SUCCESS) {
12661 		/*
12662 		 * Extract the PCI data from the 'reg' property first DWORD.
12663 		 * The entry looks like the following:
12664 		 * First DWORD:
12665 		 * Bits 8 - 10 3-bit Function number
12666 		 * Bits 11 - 15 5-bit Device number
12667 		 * Bits 16 - 23 8-bit Bus number
12668 		 */
12669 		pci_info->BusNumber = (reg_data[0] & 0x00FF0000) >> 16;
12670 		pci_info->DeviceNumber = (reg_data[0] & 0x0000F800) >> 11;
12671 		pci_info->FunctionNumber = (reg_data[0] & 0x00000700) >> 8;
12672 		ddi_prop_free((void *)reg_data);
12673 	} else {
12674 		/*
12675 		 * If we can't determine the PCI info then we fill in FF's for
12676 		 * the data to indicate this.
12677 		 */
12678 		pci_info->BusNumber = 0xFFFFFFFF;
12679 		pci_info->DeviceNumber = 0xFF;
12680 		pci_info->FunctionNumber = 0xFF;
12681 	}
12682 
12683 	/*
12684 	 * Now get the interrupt vector and the pci header.  The vector can
12685 	 * only be 0 right now.  The header is the first 256 bytes of config
12686 	 * space.
12687 	 */
12688 	pci_info->InterruptVector = 0;
12689 	for (i = 0; i < sizeof (pci_info->PciHeader); i++) {
12690 		pci_info->PciHeader[i] = pci_config_get8(mpt->m_config_handle,
12691 		    i);
12692 	}
12693 }
12694 
12695 static int
mptsas_reg_access(mptsas_t * mpt,mptsas_reg_access_t * data,int mode)12696 mptsas_reg_access(mptsas_t *mpt, mptsas_reg_access_t *data, int mode)
12697 {
12698 	int			status = 0;
12699 	mptsas_reg_access_t	driverdata;
12700 
12701 	mutex_enter(&mpt->m_mutex);
12702 	if (ddi_copyin(data, &driverdata, sizeof (driverdata), mode) == 0) {
12703 		switch (driverdata.Command) {
12704 			/*
12705 			 * IO access is not supported.
12706 			 */
12707 			case REG_IO_READ:
12708 			case REG_IO_WRITE:
12709 				mptsas_log(mpt, CE_WARN, "IO access is not "
12710 				    "supported.  Use memory access.");
12711 				status = EINVAL;
12712 				break;
12713 
12714 			case REG_MEM_READ:
12715 				driverdata.RegData = mptsas_hirrd(mpt,
12716 				    (uint32_t *)(void *)mpt->m_reg +
12717 				    driverdata.RegOffset);
12718 				if (ddi_copyout(&driverdata.RegData,
12719 				    &data->RegData,
12720 				    sizeof (driverdata.RegData), mode) != 0) {
12721 					mptsas_log(mpt, CE_WARN, "Register "
12722 					    "Read Failed");
12723 					status = EFAULT;
12724 				}
12725 				break;
12726 
12727 			case REG_MEM_WRITE:
12728 				ddi_put32(mpt->m_datap,
12729 				    (uint32_t *)(void *)mpt->m_reg +
12730 				    driverdata.RegOffset,
12731 				    driverdata.RegData);
12732 				break;
12733 
12734 			default:
12735 				status = EINVAL;
12736 				break;
12737 		}
12738 	} else {
12739 		status = EFAULT;
12740 	}
12741 
12742 	mutex_exit(&mpt->m_mutex);
12743 	return (status);
12744 }
12745 
12746 static int
led_control(mptsas_t * mpt,intptr_t data,int mode)12747 led_control(mptsas_t *mpt, intptr_t data, int mode)
12748 {
12749 	int ret = 0;
12750 	mptsas_led_control_t lc;
12751 	mptsas_enclosure_t *mep;
12752 	uint16_t slotidx;
12753 
12754 	if (ddi_copyin((void *)data, &lc, sizeof (lc), mode) != 0) {
12755 		return (EFAULT);
12756 	}
12757 
12758 	if ((lc.Command != MPTSAS_LEDCTL_FLAG_SET &&
12759 	    lc.Command != MPTSAS_LEDCTL_FLAG_GET) ||
12760 	    lc.Led < MPTSAS_LEDCTL_LED_MIN ||
12761 	    lc.Led > MPTSAS_LEDCTL_LED_MAX ||
12762 	    (lc.Command == MPTSAS_LEDCTL_FLAG_SET && lc.LedStatus != 0 &&
12763 	    lc.LedStatus != 1)) {
12764 		return (EINVAL);
12765 	}
12766 
12767 	if ((lc.Command == MPTSAS_LEDCTL_FLAG_SET && (mode & FWRITE) == 0) ||
12768 	    (lc.Command == MPTSAS_LEDCTL_FLAG_GET && (mode & FREAD) == 0))
12769 		return (EACCES);
12770 
12771 	/* Locate the required enclosure */
12772 	mutex_enter(&mpt->m_mutex);
12773 	mep = mptsas_enc_lookup(mpt, lc.Enclosure);
12774 	if (mep == NULL) {
12775 		mutex_exit(&mpt->m_mutex);
12776 		return (ENOENT);
12777 	}
12778 
12779 	if (lc.Slot < mep->me_fslot) {
12780 		mutex_exit(&mpt->m_mutex);
12781 		return (ENOENT);
12782 	}
12783 
12784 	/*
12785 	 * Slots on the enclosure are maintained in array where me_fslot is
12786 	 * entry zero. We normalize the requested slot.
12787 	 */
12788 	slotidx = lc.Slot - mep->me_fslot;
12789 	if (slotidx >= mep->me_nslots) {
12790 		mutex_exit(&mpt->m_mutex);
12791 		return (ENOENT);
12792 	}
12793 
12794 	if (lc.Command == MPTSAS_LEDCTL_FLAG_SET) {
12795 		/* Update our internal LED state. */
12796 		mep->me_slotleds[slotidx] &= ~(1 << (lc.Led - 1));
12797 		mep->me_slotleds[slotidx] |= lc.LedStatus << (lc.Led - 1);
12798 
12799 		/* Flush it to the controller. */
12800 		ret = mptsas_flush_led_status(mpt, mep, slotidx);
12801 		mutex_exit(&mpt->m_mutex);
12802 		return (ret);
12803 	}
12804 
12805 	/* Return our internal LED state. */
12806 	lc.LedStatus = (mep->me_slotleds[slotidx] >> (lc.Led - 1)) & 1;
12807 	mutex_exit(&mpt->m_mutex);
12808 
12809 	if (ddi_copyout(&lc, (void *)data, sizeof (lc), mode) != 0) {
12810 		return (EFAULT);
12811 	}
12812 
12813 	return (0);
12814 }
12815 
12816 static int
get_disk_info(mptsas_t * mpt,intptr_t data,int mode)12817 get_disk_info(mptsas_t *mpt, intptr_t data, int mode)
12818 {
12819 	uint16_t i = 0;
12820 	uint16_t count = 0;
12821 	int ret = 0;
12822 	mptsas_target_t *ptgt;
12823 	mptsas_disk_info_t *di;
12824 	STRUCT_DECL(mptsas_get_disk_info, gdi);
12825 
12826 	if ((mode & FREAD) == 0)
12827 		return (EACCES);
12828 
12829 	STRUCT_INIT(gdi, get_udatamodel());
12830 
12831 	if (ddi_copyin((void *)data, STRUCT_BUF(gdi), STRUCT_SIZE(gdi),
12832 	    mode) != 0) {
12833 		return (EFAULT);
12834 	}
12835 
12836 	/* Find out how many targets there are. */
12837 	mutex_enter(&mpt->m_mutex);
12838 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12839 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
12840 		count++;
12841 	}
12842 	mutex_exit(&mpt->m_mutex);
12843 
12844 	/*
12845 	 * If we haven't been asked to copy out information on each target,
12846 	 * then just return the count.
12847 	 */
12848 	STRUCT_FSET(gdi, DiskCount, count);
12849 	if (STRUCT_FGETP(gdi, PtrDiskInfoArray) == NULL)
12850 		goto copy_out;
12851 
12852 	/*
12853 	 * If we haven't been given a large enough buffer to copy out into,
12854 	 * let the caller know.
12855 	 */
12856 	if (STRUCT_FGET(gdi, DiskInfoArraySize) <
12857 	    count * sizeof (mptsas_disk_info_t)) {
12858 		ret = ENOSPC;
12859 		goto copy_out;
12860 	}
12861 
12862 	di = kmem_zalloc(count * sizeof (mptsas_disk_info_t), KM_SLEEP);
12863 
12864 	mutex_enter(&mpt->m_mutex);
12865 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
12866 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
12867 		if (i >= count) {
12868 			/*
12869 			 * The number of targets changed while we weren't
12870 			 * looking, so give up.
12871 			 */
12872 			refhash_rele(mpt->m_targets, ptgt);
12873 			mutex_exit(&mpt->m_mutex);
12874 			kmem_free(di, count * sizeof (mptsas_disk_info_t));
12875 			return (EAGAIN);
12876 		}
12877 		di[i].Instance = mpt->m_instance;
12878 		di[i].Enclosure = ptgt->m_enclosure;
12879 		di[i].Slot = ptgt->m_slot_num;
12880 		di[i].SasAddress = ptgt->m_addr.mta_wwn;
12881 		i++;
12882 	}
12883 	mutex_exit(&mpt->m_mutex);
12884 	STRUCT_FSET(gdi, DiskCount, i);
12885 
12886 	/* Copy out the disk information to the caller. */
12887 	if (ddi_copyout((void *)di, STRUCT_FGETP(gdi, PtrDiskInfoArray),
12888 	    i * sizeof (mptsas_disk_info_t), mode) != 0) {
12889 		ret = EFAULT;
12890 	}
12891 
12892 	kmem_free(di, count * sizeof (mptsas_disk_info_t));
12893 
12894 copy_out:
12895 	if (ddi_copyout(STRUCT_BUF(gdi), (void *)data, STRUCT_SIZE(gdi),
12896 	    mode) != 0) {
12897 		ret = EFAULT;
12898 	}
12899 
12900 	return (ret);
12901 }
12902 
12903 static int
mptsas_ioctl(dev_t dev,int cmd,intptr_t data,int mode,cred_t * credp,int * rval)12904 mptsas_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
12905     int *rval)
12906 {
12907 	int			status = 0;
12908 	mptsas_t		*mpt;
12909 	mptsas_update_flash_t	flashdata;
12910 	mptsas_pass_thru_t	passthru_data;
12911 	mptsas_adapter_data_t   adapter_data;
12912 	mptsas_pci_info_t	pci_info;
12913 	int			copylen;
12914 
12915 	int			iport_flag = 0;
12916 	dev_info_t		*dip = NULL;
12917 	mptsas_phymask_t	phymask = 0;
12918 	struct devctl_iocdata	*dcp = NULL;
12919 	char			*addr = NULL;
12920 	mptsas_target_t		*ptgt = NULL;
12921 
12922 	*rval = MPTIOCTL_STATUS_GOOD;
12923 	if (secpolicy_sys_config(credp, B_FALSE) != 0) {
12924 		return (EPERM);
12925 	}
12926 
12927 	mpt = ddi_get_soft_state(mptsas_state, MINOR2INST(getminor(dev)));
12928 	if (mpt == NULL) {
12929 		/*
12930 		 * Called from iport node, get the states
12931 		 */
12932 		iport_flag = 1;
12933 		dip = mptsas_get_dip_from_dev(dev, &phymask);
12934 		if (dip == NULL) {
12935 			return (ENXIO);
12936 		}
12937 		mpt = DIP2MPT(dip);
12938 	}
12939 	/* Make sure power level is D0 before accessing registers */
12940 	mutex_enter(&mpt->m_mutex);
12941 	if (mpt->m_options & MPTSAS_OPT_PM) {
12942 		(void) pm_busy_component(mpt->m_dip, 0);
12943 		if (mpt->m_power_level != PM_LEVEL_D0) {
12944 			mutex_exit(&mpt->m_mutex);
12945 			if (pm_raise_power(mpt->m_dip, 0, PM_LEVEL_D0) !=
12946 			    DDI_SUCCESS) {
12947 				mptsas_log(mpt, CE_WARN,
12948 				    "mptsas%d: mptsas_ioctl: Raise power "
12949 				    "request failed.", mpt->m_instance);
12950 				(void) pm_idle_component(mpt->m_dip, 0);
12951 				return (ENXIO);
12952 			}
12953 		} else {
12954 			mutex_exit(&mpt->m_mutex);
12955 		}
12956 	} else {
12957 		mutex_exit(&mpt->m_mutex);
12958 	}
12959 
12960 	if (iport_flag) {
12961 		status = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval);
12962 		if (status != 0) {
12963 			goto out;
12964 		}
12965 		/*
12966 		 * The following code control the OK2RM LED, it doesn't affect
12967 		 * the ioctl return status.
12968 		 */
12969 		if ((cmd == DEVCTL_DEVICE_ONLINE) ||
12970 		    (cmd == DEVCTL_DEVICE_OFFLINE)) {
12971 			if (ndi_dc_allochdl((void *)data, &dcp) !=
12972 			    NDI_SUCCESS) {
12973 				goto out;
12974 			}
12975 			addr = ndi_dc_getaddr(dcp);
12976 			ptgt = mptsas_addr_to_ptgt(mpt, addr, phymask);
12977 			if (ptgt == NULL) {
12978 				NDBG14(("mptsas_ioctl led control: tgt %s not "
12979 				    "found", addr));
12980 				ndi_dc_freehdl(dcp);
12981 				goto out;
12982 			}
12983 			ndi_dc_freehdl(dcp);
12984 		}
12985 		goto out;
12986 	}
12987 	switch (cmd) {
12988 		case MPTIOCTL_GET_DISK_INFO:
12989 			status = get_disk_info(mpt, data, mode);
12990 			break;
12991 		case MPTIOCTL_LED_CONTROL:
12992 			status = led_control(mpt, data, mode);
12993 			break;
12994 		case MPTIOCTL_UPDATE_FLASH:
12995 			if (ddi_copyin((void *)data, &flashdata,
12996 				sizeof (struct mptsas_update_flash), mode)) {
12997 				status = EFAULT;
12998 				break;
12999 			}
13000 
13001 			mutex_enter(&mpt->m_mutex);
13002 			if (mptsas_update_flash(mpt,
13003 			    (caddr_t)(long)flashdata.PtrBuffer,
13004 			    flashdata.ImageSize, flashdata.ImageType, mode)) {
13005 				status = EFAULT;
13006 			}
13007 
13008 			/*
13009 			 * Reset the chip to start using the new
13010 			 * firmware.  Reset if failed also.
13011 			 */
13012 			mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
13013 			if (mptsas_restart_ioc(mpt) == DDI_FAILURE) {
13014 				status = EFAULT;
13015 			}
13016 			mutex_exit(&mpt->m_mutex);
13017 			break;
13018 		case MPTIOCTL_PASS_THRU:
13019 			/*
13020 			 * The user has requested to pass through a command to
13021 			 * be executed by the MPT firmware.  Call our routine
13022 			 * which does this.  Only allow one passthru IOCTL at
13023 			 * one time. Other threads will block on
13024 			 * m_passthru_mutex, which is of adaptive variant.
13025 			 */
13026 			if (ddi_copyin((void *)data, &passthru_data,
13027 			    sizeof (mptsas_pass_thru_t), mode)) {
13028 				status = EFAULT;
13029 				break;
13030 			}
13031 			mutex_enter(&mpt->m_passthru_mutex);
13032 			mutex_enter(&mpt->m_mutex);
13033 			status = mptsas_pass_thru(mpt, &passthru_data, mode);
13034 			mutex_exit(&mpt->m_mutex);
13035 			mutex_exit(&mpt->m_passthru_mutex);
13036 
13037 			break;
13038 		case MPTIOCTL_GET_ADAPTER_DATA:
13039 			/*
13040 			 * The user has requested to read adapter data.  Call
13041 			 * our routine which does this.
13042 			 */
13043 			bzero(&adapter_data, sizeof (mptsas_adapter_data_t));
13044 			if (ddi_copyin((void *)data, (void *)&adapter_data,
13045 			    sizeof (mptsas_adapter_data_t), mode)) {
13046 				status = EFAULT;
13047 				break;
13048 			}
13049 			if (adapter_data.StructureLength >=
13050 			    sizeof (mptsas_adapter_data_t)) {
13051 				adapter_data.StructureLength = (uint32_t)
13052 				    sizeof (mptsas_adapter_data_t);
13053 				copylen = sizeof (mptsas_adapter_data_t);
13054 				mutex_enter(&mpt->m_mutex);
13055 				mptsas_read_adapter_data(mpt, &adapter_data);
13056 				mutex_exit(&mpt->m_mutex);
13057 			} else {
13058 				adapter_data.StructureLength = (uint32_t)
13059 				    sizeof (mptsas_adapter_data_t);
13060 				copylen = sizeof (adapter_data.StructureLength);
13061 				*rval = MPTIOCTL_STATUS_LEN_TOO_SHORT;
13062 			}
13063 			if (ddi_copyout((void *)(&adapter_data), (void *)data,
13064 			    copylen, mode) != 0) {
13065 				status = EFAULT;
13066 			}
13067 			break;
13068 		case MPTIOCTL_GET_PCI_INFO:
13069 			/*
13070 			 * The user has requested to read pci info.  Call
13071 			 * our routine which does this.
13072 			 */
13073 			bzero(&pci_info, sizeof (mptsas_pci_info_t));
13074 			mutex_enter(&mpt->m_mutex);
13075 			mptsas_read_pci_info(mpt, &pci_info);
13076 			mutex_exit(&mpt->m_mutex);
13077 			if (ddi_copyout((void *)(&pci_info), (void *)data,
13078 			    sizeof (mptsas_pci_info_t), mode) != 0) {
13079 				status = EFAULT;
13080 			}
13081 			break;
13082 		case MPTIOCTL_RESET_ADAPTER:
13083 			mutex_enter(&mpt->m_mutex);
13084 			mpt->m_softstate &= ~MPTSAS_SS_MSG_UNIT_RESET;
13085 			if ((mptsas_restart_ioc(mpt)) == DDI_FAILURE) {
13086 				mptsas_log(mpt, CE_WARN, "reset adapter IOCTL "
13087 				    "failed");
13088 				status = EFAULT;
13089 			}
13090 			mutex_exit(&mpt->m_mutex);
13091 			break;
13092 		case MPTIOCTL_DIAG_ACTION:
13093 			/*
13094 			 * The user has done a diag buffer action.  Call our
13095 			 * routine which does this.  Only allow one diag action
13096 			 * at one time.
13097 			 */
13098 			mutex_enter(&mpt->m_mutex);
13099 			if (mpt->m_diag_action_in_progress) {
13100 				mutex_exit(&mpt->m_mutex);
13101 				return (EBUSY);
13102 			}
13103 			mpt->m_diag_action_in_progress = 1;
13104 			status = mptsas_diag_action(mpt,
13105 			    (mptsas_diag_action_t *)data, mode);
13106 			mpt->m_diag_action_in_progress = 0;
13107 			mutex_exit(&mpt->m_mutex);
13108 			break;
13109 		case MPTIOCTL_EVENT_QUERY:
13110 			/*
13111 			 * The user has done an event query. Call our routine
13112 			 * which does this.
13113 			 */
13114 			status = mptsas_event_query(mpt,
13115 			    (mptsas_event_query_t *)data, mode, rval);
13116 			break;
13117 		case MPTIOCTL_EVENT_ENABLE:
13118 			/*
13119 			 * The user has done an event enable. Call our routine
13120 			 * which does this.
13121 			 */
13122 			status = mptsas_event_enable(mpt,
13123 			    (mptsas_event_enable_t *)data, mode, rval);
13124 			break;
13125 		case MPTIOCTL_EVENT_REPORT:
13126 			/*
13127 			 * The user has done an event report. Call our routine
13128 			 * which does this.
13129 			 */
13130 			status = mptsas_event_report(mpt,
13131 			    (mptsas_event_report_t *)data, mode, rval);
13132 			break;
13133 		case MPTIOCTL_REG_ACCESS:
13134 			/*
13135 			 * The user has requested register access.  Call our
13136 			 * routine which does this.
13137 			 */
13138 			status = mptsas_reg_access(mpt,
13139 			    (mptsas_reg_access_t *)data, mode);
13140 			break;
13141 		default:
13142 			status = scsi_hba_ioctl(dev, cmd, data, mode, credp,
13143 			    rval);
13144 			break;
13145 	}
13146 
13147 out:
13148 	return (status);
13149 }
13150 
13151 int
mptsas_restart_ioc(mptsas_t * mpt)13152 mptsas_restart_ioc(mptsas_t *mpt)
13153 {
13154 	int		rval = DDI_SUCCESS;
13155 	mptsas_target_t	*ptgt = NULL;
13156 
13157 	ASSERT(mutex_owned(&mpt->m_mutex));
13158 
13159 	/*
13160 	 * Set a flag telling I/O path that we're processing a reset.  This is
13161 	 * needed because after the reset is complete, the hash table still
13162 	 * needs to be rebuilt.  If I/Os are started before the hash table is
13163 	 * rebuilt, I/O errors will occur.  This flag allows I/Os to be marked
13164 	 * so that they can be retried.
13165 	 */
13166 	mpt->m_in_reset = TRUE;
13167 
13168 	/*
13169 	 * Wait until all the allocated sense data buffers for DMA are freed.
13170 	 */
13171 	while (mpt->m_extreq_sense_refcount > 0)
13172 		cv_wait(&mpt->m_extreq_sense_refcount_cv, &mpt->m_mutex);
13173 
13174 	/*
13175 	 * Set all throttles to HOLD
13176 	 */
13177 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13178 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
13179 		mptsas_set_throttle(mpt, ptgt, HOLD_THROTTLE);
13180 	}
13181 
13182 	/*
13183 	 * Disable interrupts
13184 	 */
13185 	MPTSAS_DISABLE_INTR(mpt);
13186 
13187 	/*
13188 	 * Abort all commands: outstanding commands, commands in waitq and
13189 	 * tx_waitq.
13190 	 */
13191 	mptsas_flush_hba(mpt);
13192 
13193 	/*
13194 	 * Reinitialize the chip.
13195 	 */
13196 	if (mptsas_init_chip(mpt, FALSE) == DDI_FAILURE) {
13197 		rval = DDI_FAILURE;
13198 	}
13199 
13200 	/*
13201 	 * Enable interrupts again
13202 	 */
13203 	MPTSAS_ENABLE_INTR(mpt);
13204 
13205 	/*
13206 	 * If mptsas_init_chip was successful, update the driver data.
13207 	 */
13208 	if (rval == DDI_SUCCESS) {
13209 		mptsas_update_driver_data(mpt);
13210 	}
13211 
13212 	/*
13213 	 * Reset the throttles
13214 	 */
13215 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
13216 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
13217 		mptsas_set_throttle(mpt, ptgt, MAX_THROTTLE);
13218 	}
13219 
13220 	mptsas_doneq_empty(mpt);
13221 	mptsas_restart_hba(mpt);
13222 
13223 	if (rval != DDI_SUCCESS) {
13224 		mptsas_fm_ereport(mpt, DDI_FM_DEVICE_NO_RESPONSE);
13225 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_LOST);
13226 	}
13227 
13228 	/*
13229 	 * Clear the reset flag so that I/Os can continue.
13230 	 */
13231 	mpt->m_in_reset = FALSE;
13232 
13233 	return (rval);
13234 }
13235 
13236 static int
mptsas_init_chip(mptsas_t * mpt,int first_time)13237 mptsas_init_chip(mptsas_t *mpt, int first_time)
13238 {
13239 	ddi_dma_cookie_t	cookie;
13240 	uint32_t		i;
13241 	int			rval;
13242 
13243 	/*
13244 	 * Setup configuration space
13245 	 */
13246 	if (mptsas_config_space_init(mpt) == FALSE) {
13247 		mptsas_log(mpt, CE_WARN, "mptsas_config_space_init "
13248 		    "failed!");
13249 		goto fail;
13250 	}
13251 
13252 	/*
13253 	 * Check to see if the firmware image is valid
13254 	 */
13255 	if (mptsas_hirrd(mpt, &mpt->m_reg->HostDiagnostic) &
13256 	    MPI2_DIAG_FLASH_BAD_SIG) {
13257 		mptsas_log(mpt, CE_WARN, "mptsas bad flash signature!");
13258 		goto fail;
13259 	}
13260 
13261 	/*
13262 	 * Reset the chip
13263 	 */
13264 	rval = mptsas_ioc_reset(mpt, first_time);
13265 	if (rval == MPTSAS_RESET_FAIL) {
13266 		mptsas_log(mpt, CE_WARN, "hard reset failed!");
13267 		goto fail;
13268 	}
13269 
13270 	if ((rval == MPTSAS_SUCCESS_MUR) && (!first_time)) {
13271 		goto mur;
13272 	}
13273 	/*
13274 	 * IOC facts can change after a diag reset so all buffers that are
13275 	 * based on these numbers must be de-allocated and re-allocated.  Get
13276 	 * new IOC facts each time chip is initialized.
13277 	 */
13278 	if (mptsas_ioc_get_facts(mpt) == DDI_FAILURE) {
13279 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_get_facts failed");
13280 		goto fail;
13281 	}
13282 
13283 	if (mptsas_alloc_active_slots(mpt, KM_SLEEP)) {
13284 		goto fail;
13285 	}
13286 	/*
13287 	 * Allocate request message frames, reply free queue, reply descriptor
13288 	 * post queue, and reply message frames using latest IOC facts.
13289 	 */
13290 	if (mptsas_alloc_request_frames(mpt) == DDI_FAILURE) {
13291 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_request_frames failed");
13292 		goto fail;
13293 	}
13294 	if (mptsas_alloc_sense_bufs(mpt) == DDI_FAILURE) {
13295 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_sense_bufs failed");
13296 		goto fail;
13297 	}
13298 	if (mptsas_alloc_free_queue(mpt) == DDI_FAILURE) {
13299 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_free_queue failed!");
13300 		goto fail;
13301 	}
13302 	if (mptsas_alloc_post_queue(mpt) == DDI_FAILURE) {
13303 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_post_queue failed!");
13304 		goto fail;
13305 	}
13306 	if (mptsas_alloc_reply_frames(mpt) == DDI_FAILURE) {
13307 		mptsas_log(mpt, CE_WARN, "mptsas_alloc_reply_frames failed!");
13308 		goto fail;
13309 	}
13310 
13311 mur:
13312 	/*
13313 	 * Re-Initialize ioc to operational state
13314 	 */
13315 	if (mptsas_ioc_init(mpt) == DDI_FAILURE) {
13316 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_init failed");
13317 		goto fail;
13318 	}
13319 
13320 	mptsas_alloc_reply_args(mpt);
13321 
13322 	/*
13323 	 * Initialize reply post index.  Reply free index is initialized after
13324 	 * the next loop.
13325 	 */
13326 	mpt->m_post_index = 0;
13327 
13328 	/*
13329 	 * Initialize the Reply Free Queue with the physical addresses of our
13330 	 * reply frames.
13331 	 */
13332 	cookie.dmac_address = mpt->m_reply_frame_dma_addr & 0xffffffffu;
13333 	for (i = 0; i < mpt->m_max_replies; i++) {
13334 		ddi_put32(mpt->m_acc_free_queue_hdl,
13335 		    &((uint32_t *)(void *)mpt->m_free_queue)[i],
13336 		    cookie.dmac_address);
13337 		cookie.dmac_address += mpt->m_reply_frame_size;
13338 	}
13339 	(void) ddi_dma_sync(mpt->m_dma_free_queue_hdl, 0, 0,
13340 	    DDI_DMA_SYNC_FORDEV);
13341 
13342 	/*
13343 	 * Initialize the reply free index to one past the last frame on the
13344 	 * queue.  This will signify that the queue is empty to start with.
13345 	 */
13346 	mpt->m_free_index = i;
13347 	ddi_put32(mpt->m_datap, &mpt->m_reg->ReplyFreeHostIndex, i);
13348 
13349 	/*
13350 	 * Initialize the reply post queue to 0xFFFFFFFF,0xFFFFFFFF's.
13351 	 */
13352 	for (i = 0; i < mpt->m_post_queue_depth; i++) {
13353 		ddi_put64(mpt->m_acc_post_queue_hdl,
13354 		    &((uint64_t *)(void *)mpt->m_post_queue)[i],
13355 		    0xFFFFFFFFFFFFFFFF);
13356 	}
13357 	(void) ddi_dma_sync(mpt->m_dma_post_queue_hdl, 0, 0,
13358 	    DDI_DMA_SYNC_FORDEV);
13359 
13360 	/*
13361 	 * Enable ports
13362 	 */
13363 	if (mptsas_ioc_enable_port(mpt) == DDI_FAILURE) {
13364 		mptsas_log(mpt, CE_WARN, "mptsas_ioc_enable_port failed");
13365 		goto fail;
13366 	}
13367 
13368 	/*
13369 	 * enable events
13370 	 */
13371 	if (mptsas_ioc_enable_event_notification(mpt)) {
13372 		mptsas_log(mpt, CE_WARN,
13373 		    "mptsas_ioc_enable_event_notification failed");
13374 		goto fail;
13375 	}
13376 
13377 	/*
13378 	 * We need checks in attach and these.
13379 	 * chip_init is called in mult. places
13380 	 */
13381 
13382 	if ((mptsas_check_dma_handle(mpt->m_dma_req_frame_hdl) !=
13383 	    DDI_SUCCESS) ||
13384 	    (mptsas_check_dma_handle(mpt->m_dma_req_sense_hdl) !=
13385 	    DDI_SUCCESS) ||
13386 	    (mptsas_check_dma_handle(mpt->m_dma_reply_frame_hdl) !=
13387 	    DDI_SUCCESS) ||
13388 	    (mptsas_check_dma_handle(mpt->m_dma_free_queue_hdl) !=
13389 	    DDI_SUCCESS) ||
13390 	    (mptsas_check_dma_handle(mpt->m_dma_post_queue_hdl) !=
13391 	    DDI_SUCCESS) ||
13392 	    (mptsas_check_dma_handle(mpt->m_hshk_dma_hdl) !=
13393 	    DDI_SUCCESS)) {
13394 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13395 		goto fail;
13396 	}
13397 
13398 	/* Check all acc handles */
13399 	if ((mptsas_check_acc_handle(mpt->m_datap) != DDI_SUCCESS) ||
13400 	    (mptsas_check_acc_handle(mpt->m_acc_req_frame_hdl) !=
13401 	    DDI_SUCCESS) ||
13402 	    (mptsas_check_acc_handle(mpt->m_acc_req_sense_hdl) !=
13403 	    DDI_SUCCESS) ||
13404 	    (mptsas_check_acc_handle(mpt->m_acc_reply_frame_hdl) !=
13405 	    DDI_SUCCESS) ||
13406 	    (mptsas_check_acc_handle(mpt->m_acc_free_queue_hdl) !=
13407 	    DDI_SUCCESS) ||
13408 	    (mptsas_check_acc_handle(mpt->m_acc_post_queue_hdl) !=
13409 	    DDI_SUCCESS) ||
13410 	    (mptsas_check_acc_handle(mpt->m_hshk_acc_hdl) !=
13411 	    DDI_SUCCESS) ||
13412 	    (mptsas_check_acc_handle(mpt->m_config_handle) !=
13413 	    DDI_SUCCESS)) {
13414 		ddi_fm_service_impact(mpt->m_dip, DDI_SERVICE_UNAFFECTED);
13415 		goto fail;
13416 	}
13417 
13418 	return (DDI_SUCCESS);
13419 
13420 fail:
13421 	return (DDI_FAILURE);
13422 }
13423 
13424 static int
mptsas_get_pci_cap(mptsas_t * mpt)13425 mptsas_get_pci_cap(mptsas_t *mpt)
13426 {
13427 	ushort_t caps_ptr, cap, cap_count;
13428 
13429 	if (mpt->m_config_handle == NULL)
13430 		return (FALSE);
13431 	/*
13432 	 * Check if capabilities list is supported and if so,
13433 	 * get initial capabilities pointer and clear bits 0,1.
13434 	 */
13435 	if (pci_config_get16(mpt->m_config_handle, PCI_CONF_STAT)
13436 	    & PCI_STAT_CAP) {
13437 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13438 		    PCI_CONF_CAP_PTR), 4);
13439 	} else {
13440 		caps_ptr = PCI_CAP_NEXT_PTR_NULL;
13441 	}
13442 
13443 	/*
13444 	 * Walk capabilities if supported.
13445 	 */
13446 	for (cap_count = 0; caps_ptr != PCI_CAP_NEXT_PTR_NULL; ) {
13447 
13448 		/*
13449 		 * Check that we haven't exceeded the maximum number of
13450 		 * capabilities and that the pointer is in a valid range.
13451 		 */
13452 		if (++cap_count > 48) {
13453 			mptsas_log(mpt, CE_WARN,
13454 			    "too many device capabilities.\n");
13455 			break;
13456 		}
13457 		if (caps_ptr < 64) {
13458 			mptsas_log(mpt, CE_WARN,
13459 			    "capabilities pointer 0x%x out of range.\n",
13460 			    caps_ptr);
13461 			break;
13462 		}
13463 
13464 		/*
13465 		 * Get next capability and check that it is valid.
13466 		 * For now, we only support power management.
13467 		 */
13468 		cap = pci_config_get8(mpt->m_config_handle, caps_ptr);
13469 		switch (cap) {
13470 			case PCI_CAP_ID_PM:
13471 				mptsas_log(mpt, CE_NOTE,
13472 				    "?mptsas%d supports power management.\n",
13473 				    mpt->m_instance);
13474 				mpt->m_options |= MPTSAS_OPT_PM;
13475 
13476 				/* Save PMCSR offset */
13477 				mpt->m_pmcsr_offset = caps_ptr + PCI_PMCSR;
13478 				break;
13479 			/*
13480 			 * The following capabilities are valid.  Any others
13481 			 * will cause a message to be logged.
13482 			 */
13483 			case PCI_CAP_ID_VPD:
13484 			case PCI_CAP_ID_MSI:
13485 			case PCI_CAP_ID_PCIX:
13486 			case PCI_CAP_ID_PCI_E:
13487 			case PCI_CAP_ID_MSI_X:
13488 				break;
13489 			default:
13490 				mptsas_log(mpt, CE_NOTE,
13491 				    "?mptsas%d unrecognized capability "
13492 				    "0x%x.\n", mpt->m_instance, cap);
13493 				break;
13494 		}
13495 
13496 		/*
13497 		 * Get next capabilities pointer and clear bits 0,1.
13498 		 */
13499 		caps_ptr = P2ALIGN(pci_config_get8(mpt->m_config_handle,
13500 		    (caps_ptr + PCI_CAP_NEXT_PTR)), 4);
13501 	}
13502 	return (TRUE);
13503 }
13504 
13505 static int
mptsas_init_pm(mptsas_t * mpt)13506 mptsas_init_pm(mptsas_t *mpt)
13507 {
13508 	char		pmc_name[16];
13509 	char		*pmc[] = {
13510 				NULL,
13511 				"0=Off (PCI D3 State)",
13512 				"3=On (PCI D0 State)",
13513 				NULL
13514 			};
13515 	uint16_t	pmcsr_stat;
13516 
13517 	if (mptsas_get_pci_cap(mpt) == FALSE) {
13518 		return (DDI_FAILURE);
13519 	}
13520 	/*
13521 	 * If PCI's capability does not support PM, then don't need
13522 	 * to registe the pm-components
13523 	 */
13524 	if (!(mpt->m_options & MPTSAS_OPT_PM))
13525 		return (DDI_SUCCESS);
13526 	/*
13527 	 * If power management is supported by this chip, create
13528 	 * pm-components property for the power management framework
13529 	 */
13530 	(void) sprintf(pmc_name, "NAME=mptsas%d", mpt->m_instance);
13531 	pmc[0] = pmc_name;
13532 	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, mpt->m_dip,
13533 	    "pm-components", pmc, 3) != DDI_PROP_SUCCESS) {
13534 		mpt->m_options &= ~MPTSAS_OPT_PM;
13535 		mptsas_log(mpt, CE_WARN,
13536 		    "mptsas%d: pm-component property creation failed.",
13537 		    mpt->m_instance);
13538 		return (DDI_FAILURE);
13539 	}
13540 
13541 	/*
13542 	 * Power on device.
13543 	 */
13544 	(void) pm_busy_component(mpt->m_dip, 0);
13545 	pmcsr_stat = pci_config_get16(mpt->m_config_handle,
13546 	    mpt->m_pmcsr_offset);
13547 	if ((pmcsr_stat & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) {
13548 		mptsas_log(mpt, CE_WARN, "mptsas%d: Power up the device",
13549 		    mpt->m_instance);
13550 		pci_config_put16(mpt->m_config_handle, mpt->m_pmcsr_offset,
13551 		    PCI_PMCSR_D0);
13552 	}
13553 	if (pm_power_has_changed(mpt->m_dip, 0, PM_LEVEL_D0) != DDI_SUCCESS) {
13554 		mptsas_log(mpt, CE_WARN, "pm_power_has_changed failed");
13555 		return (DDI_FAILURE);
13556 	}
13557 	mpt->m_power_level = PM_LEVEL_D0;
13558 	/*
13559 	 * Set pm idle delay.
13560 	 */
13561 	mpt->m_pm_idle_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
13562 	    mpt->m_dip, 0, "mptsas-pm-idle-delay", MPTSAS_PM_IDLE_TIMEOUT);
13563 
13564 	return (DDI_SUCCESS);
13565 }
13566 
13567 static int
mptsas_register_intrs(mptsas_t * mpt)13568 mptsas_register_intrs(mptsas_t *mpt)
13569 {
13570 	dev_info_t *dip;
13571 	int intr_types;
13572 
13573 	dip = mpt->m_dip;
13574 
13575 	/* Get supported interrupt types */
13576 	if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
13577 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_supported_types "
13578 		    "failed\n");
13579 		return (FALSE);
13580 	}
13581 
13582 	NDBG6(("ddi_intr_get_supported_types() returned: 0x%x", intr_types));
13583 
13584 	/*
13585 	 * Try MSI, but fall back to FIXED
13586 	 */
13587 	if (mptsas_enable_msi && (intr_types & DDI_INTR_TYPE_MSI)) {
13588 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
13589 			NDBG0(("Using MSI interrupt type"));
13590 			mpt->m_intr_type = DDI_INTR_TYPE_MSI;
13591 			return (TRUE);
13592 		}
13593 	}
13594 	if (intr_types & DDI_INTR_TYPE_FIXED) {
13595 		if (mptsas_add_intrs(mpt, DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
13596 			NDBG0(("Using FIXED interrupt type"));
13597 			mpt->m_intr_type = DDI_INTR_TYPE_FIXED;
13598 			return (TRUE);
13599 		} else {
13600 			NDBG0(("FIXED interrupt registration failed"));
13601 			return (FALSE);
13602 		}
13603 	}
13604 
13605 	return (FALSE);
13606 }
13607 
13608 static void
mptsas_unregister_intrs(mptsas_t * mpt)13609 mptsas_unregister_intrs(mptsas_t *mpt)
13610 {
13611 	mptsas_rem_intrs(mpt);
13612 }
13613 
13614 /*
13615  * mptsas_add_intrs:
13616  *
13617  * Register FIXED or MSI interrupts.
13618  */
13619 static int
mptsas_add_intrs(mptsas_t * mpt,int intr_type)13620 mptsas_add_intrs(mptsas_t *mpt, int intr_type)
13621 {
13622 	dev_info_t	*dip = mpt->m_dip;
13623 	int		avail, actual, count = 0;
13624 	int		i, flag, ret;
13625 
13626 	NDBG6(("mptsas_add_intrs:interrupt type 0x%x", intr_type));
13627 
13628 	/* Get number of interrupts */
13629 	ret = ddi_intr_get_nintrs(dip, intr_type, &count);
13630 	if ((ret != DDI_SUCCESS) || (count <= 0)) {
13631 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_nintrs() failed, "
13632 		    "ret %d count %d\n", ret, count);
13633 
13634 		return (DDI_FAILURE);
13635 	}
13636 
13637 	/* Get number of available interrupts */
13638 	ret = ddi_intr_get_navail(dip, intr_type, &avail);
13639 	if ((ret != DDI_SUCCESS) || (avail == 0)) {
13640 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_navail() failed, "
13641 		    "ret %d avail %d\n", ret, avail);
13642 
13643 		return (DDI_FAILURE);
13644 	}
13645 
13646 	if (avail < count) {
13647 		mptsas_log(mpt, CE_NOTE, "ddi_intr_get_nvail returned %d, "
13648 		    "navail() returned %d", count, avail);
13649 	}
13650 
13651 	/* Mpt only have one interrupt routine */
13652 	if ((intr_type == DDI_INTR_TYPE_MSI) && (count > 1)) {
13653 		count = 1;
13654 	}
13655 
13656 	/* Allocate an array of interrupt handles */
13657 	mpt->m_intr_size = count * sizeof (ddi_intr_handle_t);
13658 	mpt->m_htable = kmem_alloc(mpt->m_intr_size, KM_SLEEP);
13659 
13660 	flag = DDI_INTR_ALLOC_NORMAL;
13661 
13662 	/* call ddi_intr_alloc() */
13663 	ret = ddi_intr_alloc(dip, mpt->m_htable, intr_type, 0,
13664 	    count, &actual, flag);
13665 
13666 	if ((ret != DDI_SUCCESS) || (actual == 0)) {
13667 		mptsas_log(mpt, CE_WARN, "ddi_intr_alloc() failed, ret %d\n",
13668 		    ret);
13669 		kmem_free(mpt->m_htable, mpt->m_intr_size);
13670 		return (DDI_FAILURE);
13671 	}
13672 
13673 	/* use interrupt count returned or abort? */
13674 	if (actual < count) {
13675 		mptsas_log(mpt, CE_NOTE, "Requested: %d, Received: %d\n",
13676 		    count, actual);
13677 	}
13678 
13679 	mpt->m_intr_cnt = actual;
13680 
13681 	/*
13682 	 * Get priority for first msi, assume remaining are all the same
13683 	 */
13684 	if ((ret = ddi_intr_get_pri(mpt->m_htable[0],
13685 	    &mpt->m_intr_pri)) != DDI_SUCCESS) {
13686 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_pri() failed %d\n", ret);
13687 
13688 		/* Free already allocated intr */
13689 		for (i = 0; i < actual; i++) {
13690 			(void) ddi_intr_free(mpt->m_htable[i]);
13691 		}
13692 
13693 		kmem_free(mpt->m_htable, mpt->m_intr_size);
13694 		return (DDI_FAILURE);
13695 	}
13696 
13697 	/* Test for high level mutex */
13698 	if (mpt->m_intr_pri >= ddi_intr_get_hilevel_pri()) {
13699 		mptsas_log(mpt, CE_WARN, "mptsas_add_intrs: "
13700 		    "Hi level interrupt not supported\n");
13701 
13702 		/* Free already allocated intr */
13703 		for (i = 0; i < actual; i++) {
13704 			(void) ddi_intr_free(mpt->m_htable[i]);
13705 		}
13706 
13707 		kmem_free(mpt->m_htable, mpt->m_intr_size);
13708 		return (DDI_FAILURE);
13709 	}
13710 
13711 	/* Call ddi_intr_add_handler() */
13712 	for (i = 0; i < actual; i++) {
13713 		if ((ret = ddi_intr_add_handler(mpt->m_htable[i], mptsas_intr,
13714 		    (caddr_t)mpt, (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
13715 			mptsas_log(mpt, CE_WARN, "ddi_intr_add_handler() "
13716 			    "failed %d\n", ret);
13717 
13718 			/* Free already allocated intr */
13719 			for (i = 0; i < actual; i++) {
13720 				(void) ddi_intr_free(mpt->m_htable[i]);
13721 			}
13722 
13723 			kmem_free(mpt->m_htable, mpt->m_intr_size);
13724 			return (DDI_FAILURE);
13725 		}
13726 	}
13727 
13728 	if ((ret = ddi_intr_get_cap(mpt->m_htable[0], &mpt->m_intr_cap))
13729 	    != DDI_SUCCESS) {
13730 		mptsas_log(mpt, CE_WARN, "ddi_intr_get_cap() failed %d\n", ret);
13731 
13732 		/* Free already allocated intr */
13733 		for (i = 0; i < actual; i++) {
13734 			(void) ddi_intr_free(mpt->m_htable[i]);
13735 		}
13736 
13737 		kmem_free(mpt->m_htable, mpt->m_intr_size);
13738 		return (DDI_FAILURE);
13739 	}
13740 
13741 	/*
13742 	 * Enable interrupts
13743 	 */
13744 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13745 		/* Call ddi_intr_block_enable() for MSI interrupts */
13746 		(void) ddi_intr_block_enable(mpt->m_htable, mpt->m_intr_cnt);
13747 	} else {
13748 		/* Call ddi_intr_enable for MSI or FIXED interrupts */
13749 		for (i = 0; i < mpt->m_intr_cnt; i++) {
13750 			(void) ddi_intr_enable(mpt->m_htable[i]);
13751 		}
13752 	}
13753 	return (DDI_SUCCESS);
13754 }
13755 
13756 /*
13757  * mptsas_rem_intrs:
13758  *
13759  * Unregister FIXED or MSI interrupts
13760  */
13761 static void
mptsas_rem_intrs(mptsas_t * mpt)13762 mptsas_rem_intrs(mptsas_t *mpt)
13763 {
13764 	int	i;
13765 
13766 	NDBG6(("mptsas_rem_intrs"));
13767 
13768 	/* Disable all interrupts */
13769 	if (mpt->m_intr_cap & DDI_INTR_FLAG_BLOCK) {
13770 		/* Call ddi_intr_block_disable() */
13771 		(void) ddi_intr_block_disable(mpt->m_htable, mpt->m_intr_cnt);
13772 	} else {
13773 		for (i = 0; i < mpt->m_intr_cnt; i++) {
13774 			(void) ddi_intr_disable(mpt->m_htable[i]);
13775 		}
13776 	}
13777 
13778 	/* Call ddi_intr_remove_handler() */
13779 	for (i = 0; i < mpt->m_intr_cnt; i++) {
13780 		(void) ddi_intr_remove_handler(mpt->m_htable[i]);
13781 		(void) ddi_intr_free(mpt->m_htable[i]);
13782 	}
13783 
13784 	kmem_free(mpt->m_htable, mpt->m_intr_size);
13785 }
13786 
13787 /*
13788  * The IO fault service error handling callback function
13789  */
13790 /*ARGSUSED*/
13791 static int
mptsas_fm_error_cb(dev_info_t * dip,ddi_fm_error_t * err,const void * impl_data)13792 mptsas_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
13793 {
13794 	/*
13795 	 * as the driver can always deal with an error in any dma or
13796 	 * access handle, we can just return the fme_status value.
13797 	 */
13798 	pci_ereport_post(dip, err, NULL);
13799 	return (err->fme_status);
13800 }
13801 
13802 /*
13803  * mptsas_fm_init - initialize fma capabilities and register with IO
13804  *               fault services.
13805  */
13806 static void
mptsas_fm_init(mptsas_t * mpt)13807 mptsas_fm_init(mptsas_t *mpt)
13808 {
13809 	/*
13810 	 * Need to change iblock to priority for new MSI intr
13811 	 */
13812 	ddi_iblock_cookie_t	fm_ibc;
13813 
13814 	/* Only register with IO Fault Services if we have some capability */
13815 	if (mpt->m_fm_capabilities) {
13816 		/* Adjust access and dma attributes for FMA */
13817 		mpt->m_reg_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
13818 		mpt->m_msg_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13819 		mpt->m_io_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
13820 
13821 		/*
13822 		 * Register capabilities with IO Fault Services.
13823 		 * mpt->m_fm_capabilities will be updated to indicate
13824 		 * capabilities actually supported (not requested.)
13825 		 */
13826 		ddi_fm_init(mpt->m_dip, &mpt->m_fm_capabilities, &fm_ibc);
13827 
13828 		/*
13829 		 * Initialize pci ereport capabilities if ereport
13830 		 * capable (should always be.)
13831 		 */
13832 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13833 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13834 			pci_ereport_setup(mpt->m_dip);
13835 		}
13836 
13837 		/*
13838 		 * Register error callback if error callback capable.
13839 		 */
13840 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13841 			ddi_fm_handler_register(mpt->m_dip,
13842 			    mptsas_fm_error_cb, (void *) mpt);
13843 		}
13844 	}
13845 }
13846 
13847 /*
13848  * mptsas_fm_fini - Releases fma capabilities and un-registers with IO
13849  *               fault services.
13850  *
13851  */
13852 static void
mptsas_fm_fini(mptsas_t * mpt)13853 mptsas_fm_fini(mptsas_t *mpt)
13854 {
13855 	/* Only unregister FMA capabilities if registered */
13856 	if (mpt->m_fm_capabilities) {
13857 
13858 		/*
13859 		 * Un-register error callback if error callback capable.
13860 		 */
13861 
13862 		if (DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13863 			ddi_fm_handler_unregister(mpt->m_dip);
13864 		}
13865 
13866 		/*
13867 		 * Release any resources allocated by pci_ereport_setup()
13868 		 */
13869 
13870 		if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities) ||
13871 		    DDI_FM_ERRCB_CAP(mpt->m_fm_capabilities)) {
13872 			pci_ereport_teardown(mpt->m_dip);
13873 		}
13874 
13875 		/* Unregister from IO Fault Services */
13876 		ddi_fm_fini(mpt->m_dip);
13877 
13878 		/* Adjust access and dma attributes for FMA */
13879 		mpt->m_reg_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
13880 		mpt->m_msg_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13881 		mpt->m_io_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
13882 
13883 	}
13884 }
13885 
13886 int
mptsas_check_acc_handle(ddi_acc_handle_t handle)13887 mptsas_check_acc_handle(ddi_acc_handle_t handle)
13888 {
13889 	ddi_fm_error_t	de;
13890 
13891 	if (handle == NULL)
13892 		return (DDI_FAILURE);
13893 	ddi_fm_acc_err_get(handle, &de, DDI_FME_VER0);
13894 	return (de.fme_status);
13895 }
13896 
13897 int
mptsas_check_dma_handle(ddi_dma_handle_t handle)13898 mptsas_check_dma_handle(ddi_dma_handle_t handle)
13899 {
13900 	ddi_fm_error_t	de;
13901 
13902 	if (handle == NULL)
13903 		return (DDI_FAILURE);
13904 	ddi_fm_dma_err_get(handle, &de, DDI_FME_VER0);
13905 	return (de.fme_status);
13906 }
13907 
13908 void
mptsas_fm_ereport(mptsas_t * mpt,char * detail)13909 mptsas_fm_ereport(mptsas_t *mpt, char *detail)
13910 {
13911 	uint64_t	ena;
13912 	char		buf[FM_MAX_CLASS];
13913 
13914 	(void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
13915 	ena = fm_ena_generate(0, FM_ENA_FMT1);
13916 	if (DDI_FM_EREPORT_CAP(mpt->m_fm_capabilities)) {
13917 		ddi_fm_ereport_post(mpt->m_dip, buf, ena, DDI_NOSLEEP,
13918 		    FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
13919 	}
13920 }
13921 
13922 static int
mptsas_get_target_device_info(mptsas_t * mpt,uint32_t page_address,uint16_t * dev_handle,mptsas_target_t ** pptgt)13923 mptsas_get_target_device_info(mptsas_t *mpt, uint32_t page_address,
13924     uint16_t *dev_handle, mptsas_target_t **pptgt)
13925 {
13926 	int		rval;
13927 	uint32_t	dev_info;
13928 	uint64_t	sas_wwn;
13929 	mptsas_phymask_t phymask;
13930 	uint8_t		physport, phynum, config, disk;
13931 	uint64_t	devicename;
13932 	uint16_t	pdev_hdl;
13933 	mptsas_target_t	*tmp_tgt = NULL;
13934 	uint16_t	bay_num, enclosure, io_flags;
13935 
13936 	ASSERT(*pptgt == NULL);
13937 
13938 	rval = mptsas_get_sas_device_page0(mpt, page_address, dev_handle,
13939 	    &sas_wwn, &dev_info, &physport, &phynum, &pdev_hdl,
13940 	    &bay_num, &enclosure, &io_flags);
13941 	if (rval != DDI_SUCCESS) {
13942 		rval = DEV_INFO_FAIL_PAGE0;
13943 		return (rval);
13944 	}
13945 
13946 	if ((dev_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET |
13947 	    MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13948 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) == 0) {
13949 		rval = DEV_INFO_WRONG_DEVICE_TYPE;
13950 		return (rval);
13951 	}
13952 
13953 	/*
13954 	 * Check if the dev handle is for a Phys Disk. If so, set return value
13955 	 * and exit.  Don't add Phys Disks to hash.
13956 	 */
13957 	for (config = 0; config < mpt->m_num_raid_configs; config++) {
13958 		for (disk = 0; disk < MPTSAS_MAX_DISKS_IN_CONFIG; disk++) {
13959 			if (*dev_handle == mpt->m_raidconfig[config].
13960 			    m_physdisk_devhdl[disk]) {
13961 				rval = DEV_INFO_PHYS_DISK;
13962 				return (rval);
13963 			}
13964 		}
13965 	}
13966 
13967 	/*
13968 	 * Get SATA Device Name from SAS device page0 for
13969 	 * sata device, if device name doesn't exist, set mta_wwn to
13970 	 * 0 for direct attached SATA. For the device behind the expander
13971 	 * we still can use STP address assigned by expander.
13972 	 */
13973 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
13974 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
13975 		/* alloc a temporary target to send the cmd to */
13976 		tmp_tgt = mptsas_tgt_alloc(mpt->m_tmp_targets, *dev_handle,
13977 		    0, dev_info, 0, 0);
13978 		mutex_exit(&mpt->m_mutex);
13979 
13980 		devicename = mptsas_get_sata_guid(mpt, tmp_tgt, 0);
13981 
13982 		if (devicename == -1) {
13983 			mutex_enter(&mpt->m_mutex);
13984 			refhash_remove(mpt->m_tmp_targets, tmp_tgt);
13985 			rval = DEV_INFO_FAIL_GUID;
13986 			return (rval);
13987 		}
13988 
13989 		if (devicename != 0 && (((devicename >> 56) & 0xf0) == 0x50)) {
13990 			sas_wwn = devicename;
13991 		} else if (dev_info & MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH) {
13992 			sas_wwn = 0;
13993 		}
13994 
13995 		mutex_enter(&mpt->m_mutex);
13996 		refhash_remove(mpt->m_tmp_targets, tmp_tgt);
13997 	}
13998 
13999 	phymask = mptsas_physport_to_phymask(mpt, physport);
14000 	*pptgt = mptsas_tgt_alloc(mpt->m_targets, *dev_handle, sas_wwn,
14001 	    dev_info, phymask, phynum);
14002 	if (*pptgt == NULL) {
14003 		mptsas_log(mpt, CE_WARN, "Failed to allocated target"
14004 		    "structure!");
14005 		rval = DEV_INFO_FAIL_ALLOC;
14006 		return (rval);
14007 	}
14008 	(*pptgt)->m_io_flags = io_flags;
14009 	(*pptgt)->m_enclosure = enclosure;
14010 	(*pptgt)->m_slot_num = bay_num;
14011 	return (DEV_INFO_SUCCESS);
14012 }
14013 
14014 uint64_t
mptsas_get_sata_guid(mptsas_t * mpt,mptsas_target_t * ptgt,int lun)14015 mptsas_get_sata_guid(mptsas_t *mpt, mptsas_target_t *ptgt, int lun)
14016 {
14017 	uint64_t	sata_guid = 0, *pwwn = NULL;
14018 	int		target = ptgt->m_devhdl;
14019 	uchar_t		*inq83 = NULL;
14020 	int		inq83_len = 0xFF;
14021 	uchar_t		*dblk = NULL;
14022 	int		inq83_retry = 3;
14023 	int		rval = DDI_FAILURE;
14024 
14025 	inq83	= kmem_zalloc(inq83_len, KM_SLEEP);
14026 
14027 inq83_retry:
14028 	rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
14029 	    inq83_len, NULL, 1);
14030 	if (rval != DDI_SUCCESS) {
14031 		mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
14032 		    "0x83 for target:%x, lun:%x failed!", target, lun);
14033 		sata_guid = -1;
14034 		goto out;
14035 	}
14036 	/* According to SAT2, the first descriptor is logic unit name */
14037 	dblk = &inq83[4];
14038 	if ((dblk[1] & 0x30) != 0) {
14039 		mptsas_log(mpt, CE_WARN, "!Descriptor is not lun associated.");
14040 		goto out;
14041 	}
14042 	pwwn = (uint64_t *)(void *)(&dblk[4]);
14043 	if ((dblk[4] & 0xf0) == 0x50) {
14044 		sata_guid = BE_64(*pwwn);
14045 		goto out;
14046 	} else if (dblk[4] == 'A') {
14047 		NDBG20(("SATA drive has no NAA format GUID."));
14048 		goto out;
14049 	} else {
14050 		/* The data is not ready, wait and retry */
14051 		inq83_retry--;
14052 		if (inq83_retry <= 0) {
14053 			goto out;
14054 		}
14055 		NDBG20(("The GUID is not ready, retry..."));
14056 		delay(1 * drv_usectohz(1000000));
14057 		goto inq83_retry;
14058 	}
14059 out:
14060 	kmem_free(inq83, inq83_len);
14061 	return (sata_guid);
14062 }
14063 
14064 static int
mptsas_inquiry(mptsas_t * mpt,mptsas_target_t * ptgt,int lun,uchar_t page,unsigned char * buf,int len,int * reallen,uchar_t evpd)14065 mptsas_inquiry(mptsas_t *mpt, mptsas_target_t *ptgt, int lun, uchar_t page,
14066     unsigned char *buf, int len, int *reallen, uchar_t evpd)
14067 {
14068 	uchar_t			cdb[CDB_GROUP0];
14069 	struct scsi_address	ap;
14070 	struct buf		*data_bp = NULL;
14071 	int			resid = 0;
14072 	int			ret = DDI_FAILURE;
14073 
14074 	ASSERT(len <= 0xffff);
14075 
14076 	ap.a_target = MPTSAS_INVALID_DEVHDL;
14077 	ap.a_lun = (uchar_t)(lun);
14078 	ap.a_hba_tran = mpt->m_tran;
14079 
14080 	data_bp = scsi_alloc_consistent_buf(&ap,
14081 	    (struct buf *)NULL, len, B_READ, NULL_FUNC, NULL);
14082 	if (data_bp == NULL) {
14083 		return (ret);
14084 	}
14085 	bzero(cdb, CDB_GROUP0);
14086 	cdb[0] = SCMD_INQUIRY;
14087 	cdb[1] = evpd;
14088 	cdb[2] = page;
14089 	cdb[3] = (len & 0xff00) >> 8;
14090 	cdb[4] = (len & 0x00ff);
14091 	cdb[5] = 0;
14092 
14093 	ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP0, data_bp,
14094 	    &resid);
14095 	if (ret == DDI_SUCCESS) {
14096 		if (reallen) {
14097 			*reallen = len - resid;
14098 		}
14099 		bcopy((caddr_t)data_bp->b_un.b_addr, buf, len);
14100 	}
14101 	if (data_bp) {
14102 		scsi_free_consistent_buf(data_bp);
14103 	}
14104 	return (ret);
14105 }
14106 
14107 static int
mptsas_send_scsi_cmd(mptsas_t * mpt,struct scsi_address * ap,mptsas_target_t * ptgt,uchar_t * cdb,int cdblen,struct buf * data_bp,int * resid)14108 mptsas_send_scsi_cmd(mptsas_t *mpt, struct scsi_address *ap,
14109     mptsas_target_t *ptgt, uchar_t *cdb, int cdblen, struct buf *data_bp,
14110     int *resid)
14111 {
14112 	struct scsi_pkt		*pktp = NULL;
14113 	scsi_hba_tran_t		*tran_clone = NULL;
14114 	mptsas_tgt_private_t	*tgt_private = NULL;
14115 	int			ret = DDI_FAILURE;
14116 
14117 	/*
14118 	 * scsi_hba_tran_t->tran_tgt_private is used to pass the address
14119 	 * information to scsi_init_pkt, allocate a scsi_hba_tran structure
14120 	 * to simulate the cmds from sd
14121 	 */
14122 	tran_clone = kmem_alloc(
14123 	    sizeof (scsi_hba_tran_t), KM_SLEEP);
14124 	if (tran_clone == NULL) {
14125 		goto out;
14126 	}
14127 	bcopy((caddr_t)mpt->m_tran,
14128 	    (caddr_t)tran_clone, sizeof (scsi_hba_tran_t));
14129 	tgt_private = kmem_alloc(
14130 	    sizeof (mptsas_tgt_private_t), KM_SLEEP);
14131 	if (tgt_private == NULL) {
14132 		goto out;
14133 	}
14134 	tgt_private->t_lun = ap->a_lun;
14135 	tgt_private->t_private = ptgt;
14136 	tran_clone->tran_tgt_private = tgt_private;
14137 	ap->a_hba_tran = tran_clone;
14138 
14139 	pktp = scsi_init_pkt(ap, (struct scsi_pkt *)NULL,
14140 	    data_bp, cdblen, sizeof (struct scsi_arq_status),
14141 	    0, PKT_CONSISTENT, NULL, NULL);
14142 	if (pktp == NULL) {
14143 		goto out;
14144 	}
14145 	bcopy(cdb, pktp->pkt_cdbp, cdblen);
14146 	pktp->pkt_flags = FLAG_NOPARITY;
14147 	if (scsi_poll(pktp) < 0) {
14148 		goto out;
14149 	}
14150 	if (((struct scsi_status *)pktp->pkt_scbp)->sts_chk) {
14151 		goto out;
14152 	}
14153 	if (resid != NULL) {
14154 		*resid = pktp->pkt_resid;
14155 	}
14156 
14157 	ret = DDI_SUCCESS;
14158 out:
14159 	if (pktp) {
14160 		scsi_destroy_pkt(pktp);
14161 	}
14162 	if (tran_clone) {
14163 		kmem_free(tran_clone, sizeof (scsi_hba_tran_t));
14164 	}
14165 	if (tgt_private) {
14166 		kmem_free(tgt_private, sizeof (mptsas_tgt_private_t));
14167 	}
14168 	return (ret);
14169 }
14170 static int
mptsas_parse_address(char * name,uint64_t * wwid,uint8_t * phy,int * lun)14171 mptsas_parse_address(char *name, uint64_t *wwid, uint8_t *phy, int *lun)
14172 {
14173 	char	*cp = NULL;
14174 	char	*ptr = NULL;
14175 	size_t	s = 0;
14176 	char	*wwid_str = NULL;
14177 	char	*lun_str = NULL;
14178 	long	lunnum;
14179 	long	phyid = -1;
14180 	int	rc = DDI_FAILURE;
14181 
14182 	ptr = name;
14183 	ASSERT(ptr[0] == 'w' || ptr[0] == 'p');
14184 	ptr++;
14185 	if ((cp = strchr(ptr, ',')) == NULL) {
14186 		return (DDI_FAILURE);
14187 	}
14188 
14189 	wwid_str = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14190 	s = (uintptr_t)cp - (uintptr_t)ptr;
14191 
14192 	bcopy(ptr, wwid_str, s);
14193 	wwid_str[s] = '\0';
14194 
14195 	ptr = ++cp;
14196 
14197 	if ((cp = strchr(ptr, '\0')) == NULL) {
14198 		goto out;
14199 	}
14200 	lun_str =  kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14201 	s = (uintptr_t)cp - (uintptr_t)ptr;
14202 
14203 	bcopy(ptr, lun_str, s);
14204 	lun_str[s] = '\0';
14205 
14206 	if (name[0] == 'p') {
14207 		rc = ddi_strtol(wwid_str, NULL, 0x10, &phyid);
14208 	} else {
14209 		rc = scsi_wwnstr_to_wwn(wwid_str, wwid);
14210 	}
14211 	if (rc != DDI_SUCCESS)
14212 		goto out;
14213 
14214 	if (phyid != -1) {
14215 		ASSERT(phyid < MPTSAS_MAX_PHYS);
14216 		*phy = (uint8_t)phyid;
14217 	}
14218 	rc = ddi_strtol(lun_str, NULL, 0x10, &lunnum);
14219 	if (rc != 0)
14220 		goto out;
14221 
14222 	*lun = (int)lunnum;
14223 	rc = DDI_SUCCESS;
14224 out:
14225 	if (wwid_str)
14226 		kmem_free(wwid_str, SCSI_MAXNAMELEN);
14227 	if (lun_str)
14228 		kmem_free(lun_str, SCSI_MAXNAMELEN);
14229 
14230 	return (rc);
14231 }
14232 
14233 /*
14234  * mptsas_parse_smp_name() is to parse sas wwn string
14235  * which format is "wWWN"
14236  */
14237 static int
mptsas_parse_smp_name(char * name,uint64_t * wwn)14238 mptsas_parse_smp_name(char *name, uint64_t *wwn)
14239 {
14240 	char	*ptr = name;
14241 
14242 	if (*ptr != 'w') {
14243 		return (DDI_FAILURE);
14244 	}
14245 
14246 	ptr++;
14247 	if (scsi_wwnstr_to_wwn(ptr, wwn)) {
14248 		return (DDI_FAILURE);
14249 	}
14250 	return (DDI_SUCCESS);
14251 }
14252 
14253 static int
mptsas_bus_config(dev_info_t * pdip,uint_t flag,ddi_bus_config_op_t op,void * arg,dev_info_t ** childp)14254 mptsas_bus_config(dev_info_t *pdip, uint_t flag,
14255     ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
14256 {
14257 	int		ret = NDI_FAILURE;
14258 	mptsas_t	*mpt;
14259 	char		*ptr = NULL;
14260 	char		*devnm = NULL;
14261 	uint64_t	wwid = 0;
14262 	uint8_t		phy = 0xFF;
14263 	int		lun = 0;
14264 	uint_t		mflags = flag;
14265 	int		bconfig = TRUE;
14266 
14267 	if (scsi_hba_iport_unit_address(pdip) == 0) {
14268 		return (DDI_FAILURE);
14269 	}
14270 
14271 	mpt = DIP2MPT(pdip);
14272 	if (!mpt) {
14273 		return (DDI_FAILURE);
14274 	}
14275 	/*
14276 	 * Hold the nexus across the bus_config
14277 	 */
14278 	ndi_devi_enter(scsi_vhci_dip);
14279 	ndi_devi_enter(pdip);
14280 	switch (op) {
14281 	case BUS_CONFIG_ONE:
14282 		/* parse wwid/target name out of name given */
14283 		if ((ptr = strchr((char *)arg, '@')) == NULL) {
14284 			ret = NDI_FAILURE;
14285 			break;
14286 		}
14287 		ptr++;
14288 		if (strncmp((char *)arg, "smp", 3) == 0) {
14289 			/*
14290 			 * This is a SMP target device
14291 			 */
14292 			ret = mptsas_parse_smp_name(ptr, &wwid);
14293 			if (ret != DDI_SUCCESS) {
14294 				ret = NDI_FAILURE;
14295 				break;
14296 			}
14297 			ret = mptsas_config_smp(pdip, wwid, childp);
14298 		} else if ((ptr[0] == 'w') || (ptr[0] == 'p')) {
14299 			/*
14300 			 * OBP could pass down a non-canonical form
14301 			 * bootpath without LUN part when LUN is 0.
14302 			 * So driver need adjust the string.
14303 			 */
14304 			if (strchr(ptr, ',') == NULL) {
14305 				devnm = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
14306 				(void) sprintf(devnm, "%s,0", (char *)arg);
14307 				ptr = strchr(devnm, '@');
14308 				ptr++;
14309 			}
14310 
14311 			/*
14312 			 * The device path is wWWID format and the device
14313 			 * is not SMP target device.
14314 			 */
14315 			ret = mptsas_parse_address(ptr, &wwid, &phy, &lun);
14316 			if (ret != DDI_SUCCESS) {
14317 				ret = NDI_FAILURE;
14318 				break;
14319 			}
14320 			*childp = NULL;
14321 			if (ptr[0] == 'w') {
14322 				ret = mptsas_config_one_addr(pdip, wwid,
14323 				    lun, childp);
14324 			} else if (ptr[0] == 'p') {
14325 				ret = mptsas_config_one_phy(pdip, phy, lun,
14326 				    childp);
14327 			}
14328 
14329 			/*
14330 			 * If this is CD/DVD device in OBP path, the
14331 			 * ndi_busop_bus_config can be skipped as config one
14332 			 * operation is done above.
14333 			 */
14334 			if ((ret == NDI_SUCCESS) && (*childp != NULL) &&
14335 			    (strcmp(ddi_node_name(*childp), "cdrom") == 0) &&
14336 			    (strncmp((char *)arg, "disk", 4) == 0)) {
14337 				bconfig = FALSE;
14338 				ndi_hold_devi(*childp);
14339 			}
14340 		} else {
14341 			ret = NDI_FAILURE;
14342 			break;
14343 		}
14344 
14345 		/*
14346 		 * DDI group instructed us to use this flag.
14347 		 */
14348 		mflags |= NDI_MDI_FALLBACK;
14349 		break;
14350 	case BUS_CONFIG_DRIVER:
14351 	case BUS_CONFIG_ALL:
14352 		mptsas_config_all(pdip);
14353 		ret = NDI_SUCCESS;
14354 		break;
14355 	default:
14356 		ret = NDI_FAILURE;
14357 		break;
14358 	}
14359 
14360 	if ((ret == NDI_SUCCESS) && bconfig) {
14361 		ret = ndi_busop_bus_config(pdip, mflags, op,
14362 		    (devnm == NULL) ? arg : devnm, childp, 0);
14363 	}
14364 
14365 	ndi_devi_exit(pdip);
14366 	ndi_devi_exit(scsi_vhci_dip);
14367 	if (devnm != NULL)
14368 		kmem_free(devnm, SCSI_MAXNAMELEN);
14369 	return (ret);
14370 }
14371 
14372 static int
mptsas_probe_lun(dev_info_t * pdip,int lun,dev_info_t ** dip,mptsas_target_t * ptgt)14373 mptsas_probe_lun(dev_info_t *pdip, int lun, dev_info_t **dip,
14374     mptsas_target_t *ptgt)
14375 {
14376 	int			rval = DDI_FAILURE;
14377 	struct scsi_inquiry	*sd_inq = NULL;
14378 	mptsas_t		*mpt = DIP2MPT(pdip);
14379 
14380 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14381 
14382 	rval = mptsas_inquiry(mpt, ptgt, lun, 0, (uchar_t *)sd_inq,
14383 	    SUN_INQSIZE, 0, (uchar_t)0);
14384 
14385 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14386 		rval = mptsas_create_lun(pdip, sd_inq, dip, ptgt, lun);
14387 	} else {
14388 		rval = DDI_FAILURE;
14389 	}
14390 
14391 	kmem_free(sd_inq, SUN_INQSIZE);
14392 	return (rval);
14393 }
14394 
14395 static int
mptsas_config_one_addr(dev_info_t * pdip,uint64_t sasaddr,int lun,dev_info_t ** lundip)14396 mptsas_config_one_addr(dev_info_t *pdip, uint64_t sasaddr, int lun,
14397     dev_info_t **lundip)
14398 {
14399 	int		rval;
14400 	mptsas_t		*mpt = DIP2MPT(pdip);
14401 	int		phymask;
14402 	mptsas_target_t	*ptgt = NULL;
14403 
14404 	/*
14405 	 * Get the physical port associated to the iport
14406 	 */
14407 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14408 	    "phymask", 0);
14409 
14410 	ptgt = mptsas_wwid_to_ptgt(mpt, phymask, sasaddr);
14411 	if (ptgt == NULL) {
14412 		/*
14413 		 * didn't match any device by searching
14414 		 */
14415 		return (DDI_FAILURE);
14416 	}
14417 	/*
14418 	 * If the LUN already exists and the status is online,
14419 	 * we just return the pointer to dev_info_t directly.
14420 	 * For the mdi_pathinfo node, we'll handle it in
14421 	 * mptsas_create_virt_lun()
14422 	 * TODO should be also in mptsas_handle_dr
14423 	 */
14424 
14425 	*lundip = mptsas_find_child_addr(pdip, sasaddr, lun);
14426 	if (*lundip != NULL) {
14427 		/*
14428 		 * TODO Another senario is, we hotplug the same disk
14429 		 * on the same slot, the devhdl changed, is this
14430 		 * possible?
14431 		 * tgt_private->t_private != ptgt
14432 		 */
14433 		if (sasaddr != ptgt->m_addr.mta_wwn) {
14434 			/*
14435 			 * The device has changed although the devhdl is the
14436 			 * same (Enclosure mapping mode, change drive on the
14437 			 * same slot)
14438 			 */
14439 			return (DDI_FAILURE);
14440 		}
14441 		return (DDI_SUCCESS);
14442 	}
14443 
14444 	if (phymask == 0) {
14445 		/*
14446 		 * Configure IR volume
14447 		 */
14448 		rval =  mptsas_config_raid(pdip, ptgt->m_devhdl, lundip);
14449 		return (rval);
14450 	}
14451 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14452 
14453 	return (rval);
14454 }
14455 
14456 static int
mptsas_config_one_phy(dev_info_t * pdip,uint8_t phy,int lun,dev_info_t ** lundip)14457 mptsas_config_one_phy(dev_info_t *pdip, uint8_t phy, int lun,
14458     dev_info_t **lundip)
14459 {
14460 	int		rval;
14461 	mptsas_t	*mpt = DIP2MPT(pdip);
14462 	mptsas_phymask_t phymask;
14463 	mptsas_target_t	*ptgt = NULL;
14464 
14465 	/*
14466 	 * Get the physical port associated to the iport
14467 	 */
14468 	phymask = (mptsas_phymask_t)ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
14469 	    "phymask", 0);
14470 
14471 	ptgt = mptsas_phy_to_tgt(mpt, phymask, phy);
14472 	if (ptgt == NULL) {
14473 		/*
14474 		 * didn't match any device by searching
14475 		 */
14476 		return (DDI_FAILURE);
14477 	}
14478 
14479 	/*
14480 	 * If the LUN already exists and the status is online,
14481 	 * we just return the pointer to dev_info_t directly.
14482 	 * For the mdi_pathinfo node, we'll handle it in
14483 	 * mptsas_create_virt_lun().
14484 	 */
14485 
14486 	*lundip = mptsas_find_child_phy(pdip, phy);
14487 	if (*lundip != NULL) {
14488 		return (DDI_SUCCESS);
14489 	}
14490 
14491 	rval = mptsas_probe_lun(pdip, lun, lundip, ptgt);
14492 
14493 	return (rval);
14494 }
14495 
14496 static int
mptsas_retrieve_lundata(int lun_cnt,uint8_t * buf,uint16_t * lun_num,uint8_t * lun_addr_type)14497 mptsas_retrieve_lundata(int lun_cnt, uint8_t *buf, uint16_t *lun_num,
14498     uint8_t *lun_addr_type)
14499 {
14500 	uint32_t	lun_idx = 0;
14501 
14502 	ASSERT(lun_num != NULL);
14503 	ASSERT(lun_addr_type != NULL);
14504 
14505 	lun_idx = (lun_cnt + 1) * MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14506 	/* determine report luns addressing type */
14507 	switch (buf[lun_idx] & MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) {
14508 		/*
14509 		 * Vendors in the field have been found to be concatenating
14510 		 * bus/target/lun to equal the complete lun value instead
14511 		 * of switching to flat space addressing
14512 		 */
14513 		/* 00b - peripheral device addressing method */
14514 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_PERIPHERAL:
14515 		/* FALLTHRU */
14516 		/* 10b - logical unit addressing method */
14517 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_LOGICAL_UNIT:
14518 		/* FALLTHRU */
14519 		/* 01b - flat space addressing method */
14520 	case MPTSAS_SCSI_REPORTLUNS_ADDRESS_FLAT_SPACE:
14521 		/* byte0 bit0-5=msb lun byte1 bit0-7=lsb lun */
14522 		*lun_addr_type = (buf[lun_idx] &
14523 		    MPTSAS_SCSI_REPORTLUNS_ADDRESS_MASK) >> 6;
14524 		*lun_num = (buf[lun_idx] & 0x3F) << 8;
14525 		*lun_num |= buf[lun_idx + 1];
14526 		return (DDI_SUCCESS);
14527 	default:
14528 		return (DDI_FAILURE);
14529 	}
14530 }
14531 
14532 static int
mptsas_config_luns(dev_info_t * pdip,mptsas_target_t * ptgt)14533 mptsas_config_luns(dev_info_t *pdip, mptsas_target_t *ptgt)
14534 {
14535 	struct buf		*repluns_bp = NULL;
14536 	struct scsi_address	ap;
14537 	uchar_t			cdb[CDB_GROUP5];
14538 	int			ret = DDI_FAILURE;
14539 	int			retry = 0;
14540 	int			lun_list_len = 0;
14541 	uint16_t		lun_num = 0;
14542 	uint8_t			lun_addr_type = 0;
14543 	uint32_t		lun_cnt = 0;
14544 	uint32_t		lun_total = 0;
14545 	dev_info_t		*cdip = NULL;
14546 	uint16_t		*saved_repluns = NULL;
14547 	char			*buffer = NULL;
14548 	int			buf_len = 128;
14549 	mptsas_t		*mpt = DIP2MPT(pdip);
14550 	uint64_t		sas_wwn = 0;
14551 	uint8_t			phy = 0xFF;
14552 	uint32_t		dev_info = 0;
14553 
14554 	mutex_enter(&mpt->m_mutex);
14555 	sas_wwn = ptgt->m_addr.mta_wwn;
14556 	phy = ptgt->m_phynum;
14557 	dev_info = ptgt->m_deviceinfo;
14558 	mutex_exit(&mpt->m_mutex);
14559 
14560 	if (sas_wwn == 0) {
14561 		/*
14562 		 * It's a SATA without Device Name
14563 		 * So don't try multi-LUNs
14564 		 */
14565 		if (mptsas_find_child_phy(pdip, phy)) {
14566 			return (DDI_SUCCESS);
14567 		} else {
14568 			/*
14569 			 * need configure and create node
14570 			 */
14571 			return (DDI_FAILURE);
14572 		}
14573 	}
14574 
14575 	/*
14576 	 * WWN (SAS address or Device Name exist)
14577 	 */
14578 	if (dev_info & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
14579 	    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
14580 		/*
14581 		 * SATA device with Device Name
14582 		 * So don't try multi-LUNs
14583 		 */
14584 		if (mptsas_find_child_addr(pdip, sas_wwn, 0)) {
14585 			return (DDI_SUCCESS);
14586 		} else {
14587 			return (DDI_FAILURE);
14588 		}
14589 	}
14590 
14591 	do {
14592 		ap.a_target = MPTSAS_INVALID_DEVHDL;
14593 		ap.a_lun = 0;
14594 		ap.a_hba_tran = mpt->m_tran;
14595 		repluns_bp = scsi_alloc_consistent_buf(&ap,
14596 		    (struct buf *)NULL, buf_len, B_READ, NULL_FUNC, NULL);
14597 		if (repluns_bp == NULL) {
14598 			retry++;
14599 			continue;
14600 		}
14601 		bzero(cdb, CDB_GROUP5);
14602 		cdb[0] = SCMD_REPORT_LUNS;
14603 		cdb[6] = (buf_len & 0xff000000) >> 24;
14604 		cdb[7] = (buf_len & 0x00ff0000) >> 16;
14605 		cdb[8] = (buf_len & 0x0000ff00) >> 8;
14606 		cdb[9] = (buf_len & 0x000000ff);
14607 
14608 		ret = mptsas_send_scsi_cmd(mpt, &ap, ptgt, &cdb[0], CDB_GROUP5,
14609 		    repluns_bp, NULL);
14610 		if (ret != DDI_SUCCESS) {
14611 			scsi_free_consistent_buf(repluns_bp);
14612 			retry++;
14613 			continue;
14614 		}
14615 		lun_list_len = BE_32(*(int *)((void *)(
14616 		    repluns_bp->b_un.b_addr)));
14617 		if (buf_len >= lun_list_len + 8) {
14618 			ret = DDI_SUCCESS;
14619 			break;
14620 		}
14621 		scsi_free_consistent_buf(repluns_bp);
14622 		buf_len = lun_list_len + 8;
14623 
14624 	} while (retry < 3);
14625 
14626 	if (ret != DDI_SUCCESS)
14627 		return (ret);
14628 	buffer = (char *)repluns_bp->b_un.b_addr;
14629 	/*
14630 	 * find out the number of luns returned by the SCSI ReportLun call
14631 	 * and allocate buffer space
14632 	 */
14633 	lun_total = lun_list_len / MPTSAS_SCSI_REPORTLUNS_ADDRESS_SIZE;
14634 	saved_repluns = kmem_zalloc(sizeof (uint16_t) * lun_total, KM_SLEEP);
14635 	if (saved_repluns == NULL) {
14636 		scsi_free_consistent_buf(repluns_bp);
14637 		return (DDI_FAILURE);
14638 	}
14639 	for (lun_cnt = 0; lun_cnt < lun_total; lun_cnt++) {
14640 		if (mptsas_retrieve_lundata(lun_cnt, (uint8_t *)(buffer),
14641 		    &lun_num, &lun_addr_type) != DDI_SUCCESS) {
14642 			continue;
14643 		}
14644 		saved_repluns[lun_cnt] = lun_num;
14645 		if ((cdip = mptsas_find_child_addr(pdip, sas_wwn, lun_num)) !=
14646 		    NULL) {
14647 			ret = DDI_SUCCESS;
14648 		} else {
14649 			ret = mptsas_probe_lun(pdip, lun_num, &cdip,
14650 			    ptgt);
14651 		}
14652 		if ((ret == DDI_SUCCESS) && (cdip != NULL)) {
14653 			(void) ndi_prop_remove(DDI_DEV_T_NONE, cdip,
14654 			    MPTSAS_DEV_GONE);
14655 		}
14656 	}
14657 	mptsas_offline_missed_luns(pdip, saved_repluns, lun_total, ptgt);
14658 	kmem_free(saved_repluns, sizeof (uint16_t) * lun_total);
14659 	scsi_free_consistent_buf(repluns_bp);
14660 	return (DDI_SUCCESS);
14661 }
14662 
14663 static int
mptsas_config_raid(dev_info_t * pdip,uint16_t target,dev_info_t ** dip)14664 mptsas_config_raid(dev_info_t *pdip, uint16_t target, dev_info_t **dip)
14665 {
14666 	int			rval = DDI_FAILURE;
14667 	struct scsi_inquiry	*sd_inq = NULL;
14668 	mptsas_t		*mpt = DIP2MPT(pdip);
14669 	mptsas_target_t		*ptgt = NULL;
14670 
14671 	mutex_enter(&mpt->m_mutex);
14672 	ptgt = refhash_linear_search(mpt->m_targets,
14673 	    mptsas_target_eval_devhdl, &target);
14674 	mutex_exit(&mpt->m_mutex);
14675 	if (ptgt == NULL) {
14676 		mptsas_log(mpt, CE_WARN, "Volume with VolDevHandle of 0x%x "
14677 		    "not found.", target);
14678 		return (rval);
14679 	}
14680 
14681 	sd_inq = (struct scsi_inquiry *)kmem_alloc(SUN_INQSIZE, KM_SLEEP);
14682 	rval = mptsas_inquiry(mpt, ptgt, 0, 0, (uchar_t *)sd_inq,
14683 	    SUN_INQSIZE, 0, (uchar_t)0);
14684 
14685 	if ((rval == DDI_SUCCESS) && MPTSAS_VALID_LUN(sd_inq)) {
14686 		rval = mptsas_create_phys_lun(pdip, sd_inq, NULL, dip, ptgt,
14687 		    0);
14688 	} else {
14689 		rval = DDI_FAILURE;
14690 	}
14691 
14692 	kmem_free(sd_inq, SUN_INQSIZE);
14693 	return (rval);
14694 }
14695 
14696 /*
14697  * configure all RAID volumes for virtual iport
14698  */
14699 static void
mptsas_config_all_viport(dev_info_t * pdip)14700 mptsas_config_all_viport(dev_info_t *pdip)
14701 {
14702 	mptsas_t	*mpt = DIP2MPT(pdip);
14703 	int		config, vol;
14704 	int		target;
14705 	dev_info_t	*lundip = NULL;
14706 
14707 	/*
14708 	 * Get latest RAID info and search for any Volume DevHandles.  If any
14709 	 * are found, configure the volume.
14710 	 */
14711 	mutex_enter(&mpt->m_mutex);
14712 	for (config = 0; config < mpt->m_num_raid_configs; config++) {
14713 		for (vol = 0; vol < MPTSAS_MAX_RAIDVOLS; vol++) {
14714 			if (mpt->m_raidconfig[config].m_raidvol[vol].m_israid
14715 			    == 1) {
14716 				target = mpt->m_raidconfig[config].
14717 				    m_raidvol[vol].m_raidhandle;
14718 				mutex_exit(&mpt->m_mutex);
14719 				(void) mptsas_config_raid(pdip, target,
14720 				    &lundip);
14721 				mutex_enter(&mpt->m_mutex);
14722 			}
14723 		}
14724 	}
14725 	mutex_exit(&mpt->m_mutex);
14726 }
14727 
14728 static void
mptsas_offline_missed_luns(dev_info_t * pdip,uint16_t * repluns,int lun_cnt,mptsas_target_t * ptgt)14729 mptsas_offline_missed_luns(dev_info_t *pdip, uint16_t *repluns,
14730     int lun_cnt, mptsas_target_t *ptgt)
14731 {
14732 	dev_info_t	*child = NULL, *savechild = NULL;
14733 	mdi_pathinfo_t	*pip = NULL, *savepip = NULL;
14734 	uint64_t	sas_wwn, wwid;
14735 	uint8_t		phy;
14736 	int		lun;
14737 	int		i;
14738 	int		find;
14739 	char		*addr;
14740 	char		*nodename;
14741 	mptsas_t	*mpt = DIP2MPT(pdip);
14742 
14743 	mutex_enter(&mpt->m_mutex);
14744 	wwid = ptgt->m_addr.mta_wwn;
14745 	mutex_exit(&mpt->m_mutex);
14746 
14747 	child = ddi_get_child(pdip);
14748 	while (child) {
14749 		find = 0;
14750 		savechild = child;
14751 		child = ddi_get_next_sibling(child);
14752 
14753 		nodename = ddi_node_name(savechild);
14754 		if (strcmp(nodename, "smp") == 0) {
14755 			continue;
14756 		}
14757 
14758 		addr = ddi_get_name_addr(savechild);
14759 		if (addr == NULL) {
14760 			continue;
14761 		}
14762 
14763 		if (mptsas_parse_address(addr, &sas_wwn, &phy, &lun) !=
14764 		    DDI_SUCCESS) {
14765 			continue;
14766 		}
14767 
14768 		if (wwid == sas_wwn) {
14769 			for (i = 0; i < lun_cnt; i++) {
14770 				if (repluns[i] == lun) {
14771 					find = 1;
14772 					break;
14773 				}
14774 			}
14775 		} else {
14776 			continue;
14777 		}
14778 		if (find == 0) {
14779 			/*
14780 			 * The lun has not been there already
14781 			 */
14782 			(void) mptsas_offline_lun(pdip, savechild, NULL,
14783 			    NDI_DEVI_REMOVE);
14784 		}
14785 	}
14786 
14787 	pip = mdi_get_next_client_path(pdip, NULL);
14788 	while (pip) {
14789 		find = 0;
14790 		savepip = pip;
14791 		addr = MDI_PI(pip)->pi_addr;
14792 
14793 		pip = mdi_get_next_client_path(pdip, pip);
14794 
14795 		if (addr == NULL) {
14796 			continue;
14797 		}
14798 
14799 		if (mptsas_parse_address(addr, &sas_wwn, &phy,
14800 		    &lun) != DDI_SUCCESS) {
14801 			continue;
14802 		}
14803 
14804 		if (sas_wwn == wwid) {
14805 			for (i = 0; i < lun_cnt; i++) {
14806 				if (repluns[i] == lun) {
14807 					find = 1;
14808 					break;
14809 				}
14810 			}
14811 		} else {
14812 			continue;
14813 		}
14814 
14815 		if (find == 0) {
14816 			/*
14817 			 * The lun has not been there already
14818 			 */
14819 			(void) mptsas_offline_lun(pdip, NULL, savepip,
14820 			    NDI_DEVI_REMOVE);
14821 		}
14822 	}
14823 }
14824 
14825 /*
14826  * If this enclosure doesn't exist in the enclosure list, add it. If it does,
14827  * update it.
14828  */
14829 static void
mptsas_enclosure_update(mptsas_t * mpt,mptsas_enclosure_t * mep)14830 mptsas_enclosure_update(mptsas_t *mpt, mptsas_enclosure_t *mep)
14831 {
14832 	mptsas_enclosure_t *m;
14833 
14834 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
14835 	m = mptsas_enc_lookup(mpt, mep->me_enchdl);
14836 	if (m != NULL) {
14837 		uint8_t *ledp;
14838 		m->me_flags = mep->me_flags;
14839 
14840 
14841 		/*
14842 		 * If the number of slots and the first slot entry in the
14843 		 * enclosure has not changed, then we don't need to do anything
14844 		 * here. Otherwise, we need to allocate a new array for the LED
14845 		 * status of the slot.
14846 		 */
14847 		if (m->me_fslot == mep->me_fslot &&
14848 		    m->me_nslots == mep->me_nslots)
14849 			return;
14850 
14851 		/*
14852 		 * If the number of slots or the first slot has changed, it's
14853 		 * not clear that we're really in a place that we can continue
14854 		 * to honor the existing flags.
14855 		 */
14856 		if (mep->me_nslots > 0) {
14857 			ledp = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14858 			    KM_SLEEP);
14859 		} else {
14860 			ledp = NULL;
14861 		}
14862 
14863 		if (m->me_slotleds != NULL) {
14864 			kmem_free(m->me_slotleds, sizeof (uint8_t) *
14865 			    m->me_nslots);
14866 		}
14867 		m->me_slotleds = ledp;
14868 		m->me_fslot = mep->me_fslot;
14869 		m->me_nslots = mep->me_nslots;
14870 		return;
14871 	}
14872 
14873 	m = kmem_zalloc(sizeof (*m), KM_SLEEP);
14874 	m->me_enchdl = mep->me_enchdl;
14875 	m->me_flags = mep->me_flags;
14876 	m->me_nslots = mep->me_nslots;
14877 	m->me_fslot = mep->me_fslot;
14878 	if (m->me_nslots > 0) {
14879 		m->me_slotleds = kmem_zalloc(sizeof (uint8_t) * mep->me_nslots,
14880 		    KM_SLEEP);
14881 		/*
14882 		 * It may make sense to optionally flush all of the slots and/or
14883 		 * read the slot status flag here to synchronize between
14884 		 * ourselves and the card. So far, that hasn't been needed
14885 		 * annecdotally when enumerating something new. If we do, we
14886 		 * should kick that off in a taskq potentially.
14887 		 */
14888 	}
14889 	list_insert_tail(&mpt->m_enclosures, m);
14890 }
14891 
14892 static void
mptsas_update_hashtab(struct mptsas * mpt)14893 mptsas_update_hashtab(struct mptsas *mpt)
14894 {
14895 	uint32_t	page_address;
14896 	int		rval = 0;
14897 	uint16_t	dev_handle;
14898 	mptsas_target_t	*ptgt = NULL;
14899 	mptsas_smp_t	smp_node;
14900 
14901 	/*
14902 	 * Get latest RAID info.
14903 	 */
14904 	(void) mptsas_get_raid_info(mpt);
14905 
14906 	dev_handle = mpt->m_smp_devhdl;
14907 	while (mpt->m_done_traverse_smp == 0) {
14908 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
14909 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14910 		if (mptsas_get_sas_expander_page0(mpt, page_address, &smp_node)
14911 		    != DDI_SUCCESS) {
14912 			break;
14913 		}
14914 		mpt->m_smp_devhdl = dev_handle = smp_node.m_devhdl;
14915 		(void) mptsas_smp_alloc(mpt, &smp_node);
14916 	}
14917 
14918 	/*
14919 	 * Loop over enclosures so we can understand what's there.
14920 	 */
14921 	dev_handle = MPTSAS_INVALID_DEVHDL;
14922 	while (mpt->m_done_traverse_enc == 0) {
14923 		mptsas_enclosure_t me;
14924 
14925 		page_address = (MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE &
14926 		    MPI2_SAS_ENCLOS_PGAD_FORM_MASK) | (uint32_t)dev_handle;
14927 
14928 		if (mptsas_get_enclosure_page0(mpt, page_address, &me) !=
14929 		    DDI_SUCCESS) {
14930 			break;
14931 		}
14932 		dev_handle = me.me_enchdl;
14933 		mptsas_enclosure_update(mpt, &me);
14934 	}
14935 
14936 	/*
14937 	 * Config target devices
14938 	 */
14939 	dev_handle = mpt->m_dev_handle;
14940 
14941 	/*
14942 	 * Loop to get sas device page 0 by GetNextHandle till the
14943 	 * the last handle. If the sas device is a SATA/SSP target,
14944 	 * we try to config it.
14945 	 */
14946 	while (mpt->m_done_traverse_dev == 0) {
14947 		ptgt = NULL;
14948 		page_address =
14949 		    (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
14950 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
14951 		    (uint32_t)dev_handle;
14952 		rval = mptsas_get_target_device_info(mpt, page_address,
14953 		    &dev_handle, &ptgt);
14954 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
14955 		    (rval == DEV_INFO_FAIL_ALLOC)) {
14956 			break;
14957 		}
14958 		if (rval == DEV_INFO_FAIL_GUID) {
14959 			continue;
14960 		}
14961 
14962 		mpt->m_dev_handle = dev_handle;
14963 	}
14964 
14965 }
14966 
14967 void
mptsas_update_driver_data(struct mptsas * mpt)14968 mptsas_update_driver_data(struct mptsas *mpt)
14969 {
14970 	mptsas_target_t *tp;
14971 	mptsas_smp_t *sp;
14972 
14973 	ASSERT(MUTEX_HELD(&mpt->m_mutex));
14974 
14975 	/*
14976 	 * TODO after hard reset, update the driver data structures
14977 	 * 1. update port/phymask mapping table mpt->m_phy_info
14978 	 * 2. invalid all the entries in hash table
14979 	 *    m_devhdl = 0xffff and m_deviceinfo = 0
14980 	 * 3. call sas_device_page/expander_page to update hash table
14981 	 */
14982 	mptsas_update_phymask(mpt);
14983 
14984 	/*
14985 	 * Remove all the devhdls for existing entries but leave their
14986 	 * addresses alone.  In update_hashtab() below, we'll find all
14987 	 * targets that are still present and reassociate them with
14988 	 * their potentially new devhdls.  Leaving the targets around in
14989 	 * this fashion allows them to be used on the tx waitq even
14990 	 * while IOC reset is occurring.
14991 	 */
14992 	for (tp = refhash_first(mpt->m_targets); tp != NULL;
14993 	    tp = refhash_next(mpt->m_targets, tp)) {
14994 		tp->m_devhdl = MPTSAS_INVALID_DEVHDL;
14995 		tp->m_deviceinfo = 0;
14996 		tp->m_dr_flag = MPTSAS_DR_INACTIVE;
14997 	}
14998 	for (sp = refhash_first(mpt->m_smp_targets); sp != NULL;
14999 	    sp = refhash_next(mpt->m_smp_targets, sp)) {
15000 		sp->m_devhdl = MPTSAS_INVALID_DEVHDL;
15001 		sp->m_deviceinfo = 0;
15002 	}
15003 	mpt->m_done_traverse_dev = 0;
15004 	mpt->m_done_traverse_smp = 0;
15005 	mpt->m_done_traverse_enc = 0;
15006 	mpt->m_dev_handle = mpt->m_smp_devhdl = MPTSAS_INVALID_DEVHDL;
15007 	mptsas_update_hashtab(mpt);
15008 }
15009 
15010 static void
mptsas_config_all(dev_info_t * pdip)15011 mptsas_config_all(dev_info_t *pdip)
15012 {
15013 	dev_info_t	*smpdip = NULL;
15014 	mptsas_t	*mpt = DIP2MPT(pdip);
15015 	int		phymask = 0;
15016 	mptsas_phymask_t phy_mask;
15017 	mptsas_target_t	*ptgt = NULL;
15018 	mptsas_smp_t	*psmp;
15019 
15020 	/*
15021 	 * Get the phymask associated to the iport
15022 	 */
15023 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
15024 	    "phymask", 0);
15025 
15026 	/*
15027 	 * Enumerate RAID volumes here (phymask == 0).
15028 	 */
15029 	if (phymask == 0) {
15030 		mptsas_config_all_viport(pdip);
15031 		return;
15032 	}
15033 
15034 	mutex_enter(&mpt->m_mutex);
15035 
15036 	if (!mpt->m_done_traverse_dev || !mpt->m_done_traverse_smp ||
15037 	    !mpt->m_done_traverse_enc) {
15038 		mptsas_update_hashtab(mpt);
15039 	}
15040 
15041 	for (psmp = refhash_first(mpt->m_smp_targets); psmp != NULL;
15042 	    psmp = refhash_next(mpt->m_smp_targets, psmp)) {
15043 		phy_mask = psmp->m_addr.mta_phymask;
15044 		if (phy_mask == phymask) {
15045 			smpdip = NULL;
15046 			mutex_exit(&mpt->m_mutex);
15047 			(void) mptsas_online_smp(pdip, psmp, &smpdip);
15048 			mutex_enter(&mpt->m_mutex);
15049 		}
15050 	}
15051 
15052 	for (ptgt = refhash_first(mpt->m_targets); ptgt != NULL;
15053 	    ptgt = refhash_next(mpt->m_targets, ptgt)) {
15054 		phy_mask = ptgt->m_addr.mta_phymask;
15055 		if (phy_mask == phymask) {
15056 			mutex_exit(&mpt->m_mutex);
15057 			(void) mptsas_config_target(pdip, ptgt);
15058 			mutex_enter(&mpt->m_mutex);
15059 		}
15060 	}
15061 	mutex_exit(&mpt->m_mutex);
15062 }
15063 
15064 static int
mptsas_config_target(dev_info_t * pdip,mptsas_target_t * ptgt)15065 mptsas_config_target(dev_info_t *pdip, mptsas_target_t *ptgt)
15066 {
15067 	int		rval = DDI_FAILURE;
15068 	dev_info_t	*tdip;
15069 
15070 	rval = mptsas_config_luns(pdip, ptgt);
15071 	if (rval != DDI_SUCCESS) {
15072 		/*
15073 		 * The return value means the SCMD_REPORT_LUNS
15074 		 * did not execute successfully. The target maybe
15075 		 * doesn't support such command.
15076 		 */
15077 		rval = mptsas_probe_lun(pdip, 0, &tdip, ptgt);
15078 	}
15079 	return (rval);
15080 }
15081 
15082 /*
15083  * Return fail if not all the childs/paths are freed.
15084  * if there is any path under the HBA, the return value will be always fail
15085  * because we didn't call mdi_pi_free for path
15086  */
15087 static int
mptsas_offline_target(dev_info_t * pdip,char * name)15088 mptsas_offline_target(dev_info_t *pdip, char *name)
15089 {
15090 	dev_info_t		*child = NULL, *prechild = NULL;
15091 	mdi_pathinfo_t		*pip = NULL, *savepip = NULL;
15092 	int			tmp_rval, rval = DDI_SUCCESS;
15093 	char			*addr, *cp;
15094 	size_t			s;
15095 	mptsas_t		*mpt = DIP2MPT(pdip);
15096 
15097 	child = ddi_get_child(pdip);
15098 	while (child) {
15099 		addr = ddi_get_name_addr(child);
15100 		prechild = child;
15101 		child = ddi_get_next_sibling(child);
15102 
15103 		if (addr == NULL) {
15104 			continue;
15105 		}
15106 		if ((cp = strchr(addr, ',')) == NULL) {
15107 			continue;
15108 		}
15109 
15110 		s = (uintptr_t)cp - (uintptr_t)addr;
15111 
15112 		if (strncmp(addr, name, s) != 0) {
15113 			continue;
15114 		}
15115 
15116 		tmp_rval = mptsas_offline_lun(pdip, prechild, NULL,
15117 		    NDI_DEVI_REMOVE);
15118 		if (tmp_rval != DDI_SUCCESS) {
15119 			rval = DDI_FAILURE;
15120 			if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15121 			    prechild, MPTSAS_DEV_GONE) !=
15122 			    DDI_PROP_SUCCESS) {
15123 				mptsas_log(mpt, CE_WARN, "mptsas driver "
15124 				    "unable to create property for "
15125 				    "SAS %s (MPTSAS_DEV_GONE)", addr);
15126 			}
15127 		}
15128 	}
15129 
15130 	pip = mdi_get_next_client_path(pdip, NULL);
15131 	while (pip) {
15132 		addr = MDI_PI(pip)->pi_addr;
15133 		savepip = pip;
15134 		pip = mdi_get_next_client_path(pdip, pip);
15135 		if (addr == NULL) {
15136 			continue;
15137 		}
15138 
15139 		if ((cp = strchr(addr, ',')) == NULL) {
15140 			continue;
15141 		}
15142 
15143 		s = (uintptr_t)cp - (uintptr_t)addr;
15144 
15145 		if (strncmp(addr, name, s) != 0) {
15146 			continue;
15147 		}
15148 
15149 		(void) mptsas_offline_lun(pdip, NULL, savepip,
15150 		    NDI_DEVI_REMOVE);
15151 		/*
15152 		 * driver will not invoke mdi_pi_free, so path will not
15153 		 * be freed forever, return DDI_FAILURE.
15154 		 */
15155 		rval = DDI_FAILURE;
15156 	}
15157 	return (rval);
15158 }
15159 
15160 static int
mptsas_offline_lun(dev_info_t * pdip,dev_info_t * rdip,mdi_pathinfo_t * rpip,uint_t flags)15161 mptsas_offline_lun(dev_info_t *pdip, dev_info_t *rdip,
15162     mdi_pathinfo_t *rpip, uint_t flags)
15163 {
15164 	int		rval = DDI_FAILURE;
15165 	char		*devname;
15166 	dev_info_t	*cdip, *parent;
15167 
15168 	if (rpip != NULL) {
15169 		parent = scsi_vhci_dip;
15170 		cdip = mdi_pi_get_client(rpip);
15171 	} else if (rdip != NULL) {
15172 		parent = pdip;
15173 		cdip = rdip;
15174 	} else {
15175 		return (DDI_FAILURE);
15176 	}
15177 
15178 	/*
15179 	 * Make sure node is attached otherwise
15180 	 * it won't have related cache nodes to
15181 	 * clean up.  i_ddi_devi_attached is
15182 	 * similiar to i_ddi_node_state(cdip) >=
15183 	 * DS_ATTACHED.
15184 	 */
15185 	if (i_ddi_devi_attached(cdip)) {
15186 
15187 		/* Get full devname */
15188 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
15189 		(void) ddi_deviname(cdip, devname);
15190 		/* Clean cache */
15191 		(void) devfs_clean(parent, devname + 1,
15192 		    DV_CLEAN_FORCE);
15193 		kmem_free(devname, MAXNAMELEN + 1);
15194 	}
15195 	if (rpip != NULL) {
15196 		if (MDI_PI_IS_OFFLINE(rpip)) {
15197 			rval = DDI_SUCCESS;
15198 		} else {
15199 			rval = mdi_pi_offline(rpip, 0);
15200 		}
15201 	} else {
15202 		rval = ndi_devi_offline(cdip, flags);
15203 	}
15204 
15205 	return (rval);
15206 }
15207 
15208 static dev_info_t *
mptsas_find_smp_child(dev_info_t * parent,char * str_wwn)15209 mptsas_find_smp_child(dev_info_t *parent, char *str_wwn)
15210 {
15211 	dev_info_t	*child = NULL;
15212 	char		*smp_wwn = NULL;
15213 
15214 	child = ddi_get_child(parent);
15215 	while (child) {
15216 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
15217 		    DDI_PROP_DONTPASS, SMP_WWN, &smp_wwn)
15218 		    != DDI_SUCCESS) {
15219 			child = ddi_get_next_sibling(child);
15220 			continue;
15221 		}
15222 
15223 		if (strcmp(smp_wwn, str_wwn) == 0) {
15224 			ddi_prop_free(smp_wwn);
15225 			break;
15226 		}
15227 		child = ddi_get_next_sibling(child);
15228 		ddi_prop_free(smp_wwn);
15229 	}
15230 	return (child);
15231 }
15232 
15233 static int
mptsas_offline_smp(dev_info_t * pdip,mptsas_smp_t * smp_node,uint_t flags)15234 mptsas_offline_smp(dev_info_t *pdip, mptsas_smp_t *smp_node, uint_t flags)
15235 {
15236 	int		rval = DDI_FAILURE;
15237 	char		*devname;
15238 	char		wwn_str[MPTSAS_WWN_STRLEN];
15239 	dev_info_t	*cdip;
15240 
15241 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
15242 
15243 	cdip = mptsas_find_smp_child(pdip, wwn_str);
15244 
15245 	if (cdip == NULL)
15246 		return (DDI_SUCCESS);
15247 
15248 	/*
15249 	 * Make sure node is attached otherwise
15250 	 * it won't have related cache nodes to
15251 	 * clean up.  i_ddi_devi_attached is
15252 	 * similiar to i_ddi_node_state(cdip) >=
15253 	 * DS_ATTACHED.
15254 	 */
15255 	if (i_ddi_devi_attached(cdip)) {
15256 
15257 		/* Get full devname */
15258 		devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
15259 		(void) ddi_deviname(cdip, devname);
15260 		/* Clean cache */
15261 		(void) devfs_clean(pdip, devname + 1,
15262 		    DV_CLEAN_FORCE);
15263 		kmem_free(devname, MAXNAMELEN + 1);
15264 	}
15265 
15266 	rval = ndi_devi_offline(cdip, flags);
15267 
15268 	return (rval);
15269 }
15270 
15271 static dev_info_t *
mptsas_find_child(dev_info_t * pdip,char * name)15272 mptsas_find_child(dev_info_t *pdip, char *name)
15273 {
15274 	dev_info_t	*child = NULL;
15275 	char		*rname = NULL;
15276 	int		rval = DDI_FAILURE;
15277 
15278 	rname = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15279 
15280 	child = ddi_get_child(pdip);
15281 	while (child) {
15282 		rval = mptsas_name_child(child, rname, SCSI_MAXNAMELEN);
15283 		if (rval != DDI_SUCCESS) {
15284 			child = ddi_get_next_sibling(child);
15285 			bzero(rname, SCSI_MAXNAMELEN);
15286 			continue;
15287 		}
15288 
15289 		if (strcmp(rname, name) == 0) {
15290 			break;
15291 		}
15292 		child = ddi_get_next_sibling(child);
15293 		bzero(rname, SCSI_MAXNAMELEN);
15294 	}
15295 
15296 	kmem_free(rname, SCSI_MAXNAMELEN);
15297 
15298 	return (child);
15299 }
15300 
15301 
15302 static dev_info_t *
mptsas_find_child_addr(dev_info_t * pdip,uint64_t sasaddr,int lun)15303 mptsas_find_child_addr(dev_info_t *pdip, uint64_t sasaddr, int lun)
15304 {
15305 	dev_info_t	*child = NULL;
15306 	char		*name = NULL;
15307 	char		*addr = NULL;
15308 
15309 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15310 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15311 	(void) sprintf(name, "%016"PRIx64, sasaddr);
15312 	(void) sprintf(addr, "w%s,%x", name, lun);
15313 	child = mptsas_find_child(pdip, addr);
15314 	kmem_free(name, SCSI_MAXNAMELEN);
15315 	kmem_free(addr, SCSI_MAXNAMELEN);
15316 	return (child);
15317 }
15318 
15319 static dev_info_t *
mptsas_find_child_phy(dev_info_t * pdip,uint8_t phy)15320 mptsas_find_child_phy(dev_info_t *pdip, uint8_t phy)
15321 {
15322 	dev_info_t	*child;
15323 	char		*addr;
15324 
15325 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15326 	(void) sprintf(addr, "p%x,0", phy);
15327 	child = mptsas_find_child(pdip, addr);
15328 	kmem_free(addr, SCSI_MAXNAMELEN);
15329 	return (child);
15330 }
15331 
15332 static mdi_pathinfo_t *
mptsas_find_path_phy(dev_info_t * pdip,uint8_t phy)15333 mptsas_find_path_phy(dev_info_t *pdip, uint8_t phy)
15334 {
15335 	mdi_pathinfo_t	*path;
15336 	char		*addr = NULL;
15337 
15338 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15339 	(void) sprintf(addr, "p%x,0", phy);
15340 	path = mdi_pi_find(pdip, NULL, addr);
15341 	kmem_free(addr, SCSI_MAXNAMELEN);
15342 	return (path);
15343 }
15344 
15345 static mdi_pathinfo_t *
mptsas_find_path_addr(dev_info_t * parent,uint64_t sasaddr,int lun)15346 mptsas_find_path_addr(dev_info_t *parent, uint64_t sasaddr, int lun)
15347 {
15348 	mdi_pathinfo_t	*path;
15349 	char		*name = NULL;
15350 	char		*addr = NULL;
15351 
15352 	name = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15353 	addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15354 	(void) sprintf(name, "%016"PRIx64, sasaddr);
15355 	(void) sprintf(addr, "w%s,%x", name, lun);
15356 	path = mdi_pi_find(parent, NULL, addr);
15357 	kmem_free(name, SCSI_MAXNAMELEN);
15358 	kmem_free(addr, SCSI_MAXNAMELEN);
15359 
15360 	return (path);
15361 }
15362 
15363 static int
mptsas_create_lun(dev_info_t * pdip,struct scsi_inquiry * sd_inq,dev_info_t ** lun_dip,mptsas_target_t * ptgt,int lun)15364 mptsas_create_lun(dev_info_t *pdip, struct scsi_inquiry *sd_inq,
15365     dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15366 {
15367 	int			i = 0;
15368 	uchar_t			*inq83 = NULL;
15369 	int			inq83_len1 = 0xFF;
15370 	int			inq83_len = 0;
15371 	int			rval = DDI_FAILURE;
15372 	ddi_devid_t		devid;
15373 	char			*guid = NULL;
15374 	int			target = ptgt->m_devhdl;
15375 	mdi_pathinfo_t		*pip = NULL;
15376 	mptsas_t		*mpt = DIP2MPT(pdip);
15377 
15378 	/*
15379 	 * For DVD/CD ROM and tape devices and optical
15380 	 * devices, we won't try to enumerate them under
15381 	 * scsi_vhci, so no need to try page83
15382 	 */
15383 	if (sd_inq && (sd_inq->inq_dtype == DTYPE_RODIRECT ||
15384 	    sd_inq->inq_dtype == DTYPE_OPTICAL ||
15385 	    sd_inq->inq_dtype == DTYPE_ESI))
15386 		goto create_lun;
15387 
15388 	/*
15389 	 * The LCA returns good SCSI status, but corrupt page 83 data the first
15390 	 * time it is queried. The solution is to keep trying to request page83
15391 	 * and verify the GUID is not (DDI_NOT_WELL_FORMED) in
15392 	 * mptsas_inq83_retry_timeout seconds. If the timeout expires, driver
15393 	 * give up to get VPD page at this stage and fail the enumeration.
15394 	 */
15395 
15396 	inq83	= kmem_zalloc(inq83_len1, KM_SLEEP);
15397 
15398 	for (i = 0; i < mptsas_inq83_retry_timeout; i++) {
15399 		rval = mptsas_inquiry(mpt, ptgt, lun, 0x83, inq83,
15400 		    inq83_len1, &inq83_len, 1);
15401 		if (rval != 0) {
15402 			mptsas_log(mpt, CE_WARN, "!mptsas request inquiry page "
15403 			    "0x83 for target:%x, lun:%x failed!", target, lun);
15404 			if (mptsas_physical_bind_failed_page_83 != B_FALSE)
15405 				goto create_lun;
15406 			goto out;
15407 		}
15408 		/*
15409 		 * create DEVID from inquiry data
15410 		 */
15411 		if ((rval = ddi_devid_scsi_encode(
15412 		    DEVID_SCSI_ENCODE_VERSION_LATEST, NULL, (uchar_t *)sd_inq,
15413 		    sizeof (struct scsi_inquiry), NULL, 0, inq83,
15414 		    (size_t)inq83_len, &devid)) == DDI_SUCCESS) {
15415 			/*
15416 			 * extract GUID from DEVID
15417 			 */
15418 			guid = ddi_devid_to_guid(devid);
15419 
15420 			/*
15421 			 * Do not enable MPXIO if the strlen(guid) is greater
15422 			 * than MPTSAS_MAX_GUID_LEN, this constrain would be
15423 			 * handled by framework later.
15424 			 */
15425 			if (guid && (strlen(guid) > MPTSAS_MAX_GUID_LEN)) {
15426 				ddi_devid_free_guid(guid);
15427 				guid = NULL;
15428 				if (mpt->m_mpxio_enable == TRUE) {
15429 					mptsas_log(mpt, CE_NOTE, "!Target:%x, "
15430 					    "lun:%x doesn't have a valid GUID, "
15431 					    "multipathing for this drive is "
15432 					    "not enabled", target, lun);
15433 				}
15434 			}
15435 
15436 			/*
15437 			 * devid no longer needed
15438 			 */
15439 			ddi_devid_free(devid);
15440 			break;
15441 		} else if (rval == DDI_NOT_WELL_FORMED) {
15442 			/*
15443 			 * return value of ddi_devid_scsi_encode equal to
15444 			 * DDI_NOT_WELL_FORMED means DEVID_RETRY, it worth
15445 			 * to retry inquiry page 0x83 and get GUID.
15446 			 */
15447 			NDBG20(("Not well formed devid, retry..."));
15448 			delay(1 * drv_usectohz(1000000));
15449 			continue;
15450 		} else {
15451 			mptsas_log(mpt, CE_WARN, "!Encode devid failed for "
15452 			    "path target:%x, lun:%x", target, lun);
15453 			rval = DDI_FAILURE;
15454 			goto create_lun;
15455 		}
15456 	}
15457 
15458 	if (i == mptsas_inq83_retry_timeout) {
15459 		mptsas_log(mpt, CE_WARN, "!Repeated page83 requests timeout "
15460 		    "for path target:%x, lun:%x", target, lun);
15461 	}
15462 
15463 	rval = DDI_FAILURE;
15464 
15465 create_lun:
15466 	if ((guid != NULL) && (mpt->m_mpxio_enable == TRUE)) {
15467 		rval = mptsas_create_virt_lun(pdip, sd_inq, guid, lun_dip, &pip,
15468 		    ptgt, lun);
15469 	}
15470 	if (rval != DDI_SUCCESS) {
15471 		rval = mptsas_create_phys_lun(pdip, sd_inq, guid, lun_dip,
15472 		    ptgt, lun);
15473 
15474 	}
15475 out:
15476 	if (guid != NULL) {
15477 		/*
15478 		 * guid no longer needed
15479 		 */
15480 		ddi_devid_free_guid(guid);
15481 	}
15482 	if (inq83 != NULL)
15483 		kmem_free(inq83, inq83_len1);
15484 	return (rval);
15485 }
15486 
15487 static int
mptsas_create_virt_lun(dev_info_t * pdip,struct scsi_inquiry * inq,char * guid,dev_info_t ** lun_dip,mdi_pathinfo_t ** pip,mptsas_target_t * ptgt,int lun)15488 mptsas_create_virt_lun(dev_info_t *pdip, struct scsi_inquiry *inq, char *guid,
15489     dev_info_t **lun_dip, mdi_pathinfo_t **pip, mptsas_target_t *ptgt, int lun)
15490 {
15491 	int			target;
15492 	char			*nodename = NULL;
15493 	char			**compatible = NULL;
15494 	int			ncompatible	= 0;
15495 	int			mdi_rtn = MDI_FAILURE;
15496 	int			rval = DDI_FAILURE;
15497 	char			*old_guid = NULL;
15498 	mptsas_t		*mpt = DIP2MPT(pdip);
15499 	char			*lun_addr = NULL;
15500 	char			*wwn_str = NULL;
15501 	char			*attached_wwn_str = NULL;
15502 	char			*component = NULL;
15503 	uint8_t			phy = 0xFF;
15504 	uint64_t		sas_wwn;
15505 	int64_t			lun64 = 0;
15506 	uint32_t		devinfo;
15507 	uint16_t		dev_hdl;
15508 	uint16_t		pdev_hdl;
15509 	uint64_t		dev_sas_wwn;
15510 	uint64_t		pdev_sas_wwn;
15511 	uint32_t		pdev_info;
15512 	uint8_t			physport;
15513 	uint8_t			phy_id;
15514 	uint32_t		page_address;
15515 	uint16_t		bay_num, enclosure, io_flags;
15516 	char			pdev_wwn_str[MPTSAS_WWN_STRLEN];
15517 	uint32_t		dev_info;
15518 
15519 	mutex_enter(&mpt->m_mutex);
15520 	target = ptgt->m_devhdl;
15521 	sas_wwn = ptgt->m_addr.mta_wwn;
15522 	devinfo = ptgt->m_deviceinfo;
15523 	phy = ptgt->m_phynum;
15524 	mutex_exit(&mpt->m_mutex);
15525 
15526 	if (sas_wwn) {
15527 		*pip = mptsas_find_path_addr(pdip, sas_wwn, lun);
15528 	} else {
15529 		*pip = mptsas_find_path_phy(pdip, phy);
15530 	}
15531 
15532 	if (*pip != NULL) {
15533 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15534 		ASSERT(*lun_dip != NULL);
15535 		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, *lun_dip,
15536 		    (DDI_PROP_DONTPASS | DDI_PROP_NOTPROM),
15537 		    MDI_CLIENT_GUID_PROP, &old_guid) == DDI_SUCCESS) {
15538 			if (strncmp(guid, old_guid, strlen(guid)) == 0) {
15539 				/*
15540 				 * Same path back online again.
15541 				 */
15542 				(void) ddi_prop_free(old_guid);
15543 				if ((!MDI_PI_IS_ONLINE(*pip)) &&
15544 				    (!MDI_PI_IS_STANDBY(*pip)) &&
15545 				    (ptgt->m_tgt_unconfigured == 0)) {
15546 					rval = mdi_pi_online(*pip, 0);
15547 				} else {
15548 					rval = DDI_SUCCESS;
15549 				}
15550 				if (rval != DDI_SUCCESS) {
15551 					mptsas_log(mpt, CE_WARN, "path:target: "
15552 					    "%x, lun:%x online failed!", target,
15553 					    lun);
15554 					*pip = NULL;
15555 					*lun_dip = NULL;
15556 				}
15557 				return (rval);
15558 			} else {
15559 				/*
15560 				 * The GUID of the LUN has changed which maybe
15561 				 * because customer mapped another volume to the
15562 				 * same LUN.
15563 				 */
15564 				mptsas_log(mpt, CE_WARN, "The GUID of the "
15565 				    "target:%x, lun:%x was changed, maybe "
15566 				    "because someone mapped another volume "
15567 				    "to the same LUN", target, lun);
15568 				(void) ddi_prop_free(old_guid);
15569 				if (!MDI_PI_IS_OFFLINE(*pip)) {
15570 					rval = mdi_pi_offline(*pip, 0);
15571 					if (rval != MDI_SUCCESS) {
15572 						mptsas_log(mpt, CE_WARN, "path:"
15573 						    "target:%x, lun:%x offline "
15574 						    "failed!", target, lun);
15575 						*pip = NULL;
15576 						*lun_dip = NULL;
15577 						return (DDI_FAILURE);
15578 					}
15579 				}
15580 				if (mdi_pi_free(*pip, 0) != MDI_SUCCESS) {
15581 					mptsas_log(mpt, CE_WARN, "path:target:"
15582 					    "%x, lun:%x free failed!", target,
15583 					    lun);
15584 					*pip = NULL;
15585 					*lun_dip = NULL;
15586 					return (DDI_FAILURE);
15587 				}
15588 			}
15589 		} else {
15590 			mptsas_log(mpt, CE_WARN, "Can't get client-guid "
15591 			    "property for path:target:%x, lun:%x", target, lun);
15592 			*pip = NULL;
15593 			*lun_dip = NULL;
15594 			return (DDI_FAILURE);
15595 		}
15596 	}
15597 	scsi_hba_nodename_compatible_get(inq, NULL,
15598 	    inq->inq_dtype, NULL, &nodename, &compatible, &ncompatible);
15599 
15600 	/*
15601 	 * if nodename can't be determined then print a message and skip it
15602 	 */
15603 	if (nodename == NULL) {
15604 		mptsas_log(mpt, CE_WARN, "mptsas driver found no compatible "
15605 		    "driver for target%d lun %d dtype:0x%02x", target, lun,
15606 		    inq->inq_dtype);
15607 		return (DDI_FAILURE);
15608 	}
15609 
15610 	wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15611 	/* The property is needed by MPAPI */
15612 	(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
15613 
15614 	lun_addr = kmem_zalloc(SCSI_MAXNAMELEN, KM_SLEEP);
15615 	if (guid) {
15616 		(void) sprintf(lun_addr, "w%s,%x", wwn_str, lun);
15617 		(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15618 	} else {
15619 		(void) sprintf(lun_addr, "p%x,%x", phy, lun);
15620 		(void) sprintf(wwn_str, "p%x", phy);
15621 	}
15622 
15623 	mdi_rtn = mdi_pi_alloc_compatible(pdip, nodename,
15624 	    guid, lun_addr, compatible, ncompatible,
15625 	    0, pip);
15626 	if (mdi_rtn == MDI_SUCCESS) {
15627 
15628 		if (mdi_prop_update_string(*pip, MDI_GUID,
15629 		    guid) != DDI_SUCCESS) {
15630 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15631 			    "create prop for target %d lun %d (MDI_GUID)",
15632 			    target, lun);
15633 			mdi_rtn = MDI_FAILURE;
15634 			goto virt_create_done;
15635 		}
15636 
15637 		if (mdi_prop_update_int(*pip, LUN_PROP,
15638 		    lun) != DDI_SUCCESS) {
15639 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15640 			    "create prop for target %d lun %d (LUN_PROP)",
15641 			    target, lun);
15642 			mdi_rtn = MDI_FAILURE;
15643 			goto virt_create_done;
15644 		}
15645 		lun64 = (int64_t)lun;
15646 		if (mdi_prop_update_int64(*pip, LUN64_PROP,
15647 		    lun64) != DDI_SUCCESS) {
15648 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15649 			    "create prop for target %d (LUN64_PROP)",
15650 			    target);
15651 			mdi_rtn = MDI_FAILURE;
15652 			goto virt_create_done;
15653 		}
15654 		if (mdi_prop_update_string_array(*pip, "compatible",
15655 		    compatible, ncompatible) !=
15656 		    DDI_PROP_SUCCESS) {
15657 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15658 			    "create prop for target %d lun %d (COMPATIBLE)",
15659 			    target, lun);
15660 			mdi_rtn = MDI_FAILURE;
15661 			goto virt_create_done;
15662 		}
15663 		if (sas_wwn && (mdi_prop_update_string(*pip,
15664 		    SCSI_ADDR_PROP_TARGET_PORT, wwn_str) != DDI_PROP_SUCCESS)) {
15665 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15666 			    "create prop for target %d lun %d "
15667 			    "(target-port)", target, lun);
15668 			mdi_rtn = MDI_FAILURE;
15669 			goto virt_create_done;
15670 		} else if ((sas_wwn == 0) && (mdi_prop_update_int(*pip,
15671 		    "sata-phy", phy) != DDI_PROP_SUCCESS)) {
15672 			/*
15673 			 * Direct attached SATA device without DeviceName
15674 			 */
15675 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15676 			    "create prop for SAS target %d lun %d "
15677 			    "(sata-phy)", target, lun);
15678 			mdi_rtn = MDI_FAILURE;
15679 			goto virt_create_done;
15680 		}
15681 		mutex_enter(&mpt->m_mutex);
15682 
15683 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15684 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
15685 		    (uint32_t)ptgt->m_devhdl;
15686 		rval = mptsas_get_sas_device_page0(mpt, page_address,
15687 		    &dev_hdl, &dev_sas_wwn, &dev_info, &physport,
15688 		    &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15689 		if (rval != DDI_SUCCESS) {
15690 			mutex_exit(&mpt->m_mutex);
15691 			mptsas_log(mpt, CE_WARN, "mptsas unable to get "
15692 			    "parent device for handle %d", page_address);
15693 			mdi_rtn = MDI_FAILURE;
15694 			goto virt_create_done;
15695 		}
15696 
15697 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
15698 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
15699 		rval = mptsas_get_sas_device_page0(mpt, page_address,
15700 		    &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
15701 		    &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
15702 		if (rval != DDI_SUCCESS) {
15703 			mutex_exit(&mpt->m_mutex);
15704 			mptsas_log(mpt, CE_WARN, "mptsas unable to get"
15705 			    "device info for handle %d", page_address);
15706 			mdi_rtn = MDI_FAILURE;
15707 			goto virt_create_done;
15708 		}
15709 
15710 		mutex_exit(&mpt->m_mutex);
15711 
15712 		/*
15713 		 * If this device direct attached to the controller
15714 		 * set the attached-port to the base wwid
15715 		 */
15716 		if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
15717 		    != DEVINFO_DIRECT_ATTACHED) {
15718 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15719 			    pdev_sas_wwn);
15720 		} else {
15721 			/*
15722 			 * Update the iport's attached-port to guid
15723 			 */
15724 			if (sas_wwn == 0) {
15725 				(void) sprintf(wwn_str, "p%x", phy);
15726 			} else {
15727 				(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15728 			}
15729 			if (ddi_prop_update_string(DDI_DEV_T_NONE,
15730 			    pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
15731 			    DDI_PROP_SUCCESS) {
15732 				mptsas_log(mpt, CE_WARN,
15733 				    "mptsas unable to create "
15734 				    "property for iport target-port"
15735 				    " %s (sas_wwn)",
15736 				    wwn_str);
15737 				mdi_rtn = MDI_FAILURE;
15738 				goto virt_create_done;
15739 			}
15740 
15741 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
15742 			    mpt->un.m_base_wwid);
15743 		}
15744 
15745 		if (IS_SATA_DEVICE(ptgt->m_deviceinfo)) {
15746 			char	uabuf[SCSI_WWN_BUFLEN];
15747 
15748 			if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
15749 				mptsas_log(mpt, CE_WARN,
15750 				    "mptsas unable to format SATA bridge WWN");
15751 				mdi_rtn = MDI_FAILURE;
15752 				goto virt_create_done;
15753 			}
15754 
15755 			if (mdi_prop_update_string(*pip,
15756 			    SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
15757 			    DDI_SUCCESS) {
15758 				mptsas_log(mpt, CE_WARN,
15759 				    "mptsas unable to create SCSI bridge port "
15760 				    "property for SATA device");
15761 				mdi_rtn = MDI_FAILURE;
15762 				goto virt_create_done;
15763 			}
15764 		}
15765 
15766 		if (mdi_prop_update_string(*pip,
15767 		    SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
15768 		    DDI_PROP_SUCCESS) {
15769 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15770 			    "property for iport attached-port %s (sas_wwn)",
15771 			    attached_wwn_str);
15772 			mdi_rtn = MDI_FAILURE;
15773 			goto virt_create_done;
15774 		}
15775 
15776 
15777 		if (inq->inq_dtype == 0) {
15778 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
15779 			/*
15780 			 * set obp path for pathinfo
15781 			 */
15782 			(void) snprintf(component, MAXPATHLEN,
15783 			    "disk@%s", lun_addr);
15784 
15785 			if (mdi_pi_pathname_obp_set(*pip, component) !=
15786 			    DDI_SUCCESS) {
15787 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
15788 				    "unable to set obp-path for object %s",
15789 				    component);
15790 				mdi_rtn = MDI_FAILURE;
15791 				goto virt_create_done;
15792 			}
15793 		}
15794 
15795 		*lun_dip = MDI_PI(*pip)->pi_client->ct_dip;
15796 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
15797 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
15798 			if ((ndi_prop_update_int(DDI_DEV_T_NONE, *lun_dip,
15799 			    "pm-capable", 1)) !=
15800 			    DDI_PROP_SUCCESS) {
15801 				mptsas_log(mpt, CE_WARN, "mptsas driver"
15802 				    "failed to create pm-capable "
15803 				    "property, target %d", target);
15804 				mdi_rtn = MDI_FAILURE;
15805 				goto virt_create_done;
15806 			}
15807 		}
15808 		/*
15809 		 * Create the phy-num property
15810 		 */
15811 		if (mdi_prop_update_int(*pip, "phy-num",
15812 		    ptgt->m_phynum) != DDI_SUCCESS) {
15813 			mptsas_log(mpt, CE_WARN, "mptsas driver unable to "
15814 			    "create phy-num property for target %d lun %d",
15815 			    target, lun);
15816 			mdi_rtn = MDI_FAILURE;
15817 			goto virt_create_done;
15818 		}
15819 		NDBG20(("new path:%s onlining,", MDI_PI(*pip)->pi_addr));
15820 		mdi_rtn = mdi_pi_online(*pip, 0);
15821 		if (mdi_rtn == MDI_NOT_SUPPORTED) {
15822 			mdi_rtn = MDI_FAILURE;
15823 		}
15824 virt_create_done:
15825 		if (*pip && mdi_rtn != MDI_SUCCESS) {
15826 			(void) mdi_pi_free(*pip, 0);
15827 			*pip = NULL;
15828 			*lun_dip = NULL;
15829 		}
15830 	}
15831 
15832 	scsi_hba_nodename_compatible_free(nodename, compatible);
15833 	if (lun_addr != NULL) {
15834 		kmem_free(lun_addr, SCSI_MAXNAMELEN);
15835 	}
15836 	if (wwn_str != NULL) {
15837 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
15838 	}
15839 	if (component != NULL) {
15840 		kmem_free(component, MAXPATHLEN);
15841 	}
15842 
15843 	return ((mdi_rtn == MDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
15844 }
15845 
15846 static int
mptsas_create_phys_lun(dev_info_t * pdip,struct scsi_inquiry * inq,char * guid,dev_info_t ** lun_dip,mptsas_target_t * ptgt,int lun)15847 mptsas_create_phys_lun(dev_info_t *pdip, struct scsi_inquiry *inq,
15848     char *guid, dev_info_t **lun_dip, mptsas_target_t *ptgt, int lun)
15849 {
15850 	int			target;
15851 	int			rval;
15852 	int			ndi_rtn = NDI_FAILURE;
15853 	uint64_t		be_sas_wwn;
15854 	char			*nodename = NULL;
15855 	char			**compatible = NULL;
15856 	int			ncompatible = 0;
15857 	int			instance = 0;
15858 	mptsas_t		*mpt = DIP2MPT(pdip);
15859 	char			*wwn_str = NULL;
15860 	char			*component = NULL;
15861 	char			*attached_wwn_str = NULL;
15862 	uint8_t			phy = 0xFF;
15863 	uint64_t		sas_wwn;
15864 	uint32_t		devinfo;
15865 	uint16_t		dev_hdl;
15866 	uint16_t		pdev_hdl;
15867 	uint64_t		pdev_sas_wwn;
15868 	uint64_t		dev_sas_wwn;
15869 	uint32_t		pdev_info;
15870 	uint8_t			physport;
15871 	uint8_t			phy_id;
15872 	uint32_t		page_address;
15873 	uint16_t		bay_num, enclosure, io_flags;
15874 	char			pdev_wwn_str[MPTSAS_WWN_STRLEN];
15875 	uint32_t		dev_info;
15876 	int64_t			lun64 = 0;
15877 
15878 	mutex_enter(&mpt->m_mutex);
15879 	target = ptgt->m_devhdl;
15880 	sas_wwn = ptgt->m_addr.mta_wwn;
15881 	devinfo = ptgt->m_deviceinfo;
15882 	phy = ptgt->m_phynum;
15883 	mutex_exit(&mpt->m_mutex);
15884 
15885 	/*
15886 	 * generate compatible property with binding-set "mpt"
15887 	 */
15888 	scsi_hba_nodename_compatible_get(inq, NULL, inq->inq_dtype, NULL,
15889 	    &nodename, &compatible, &ncompatible);
15890 
15891 	/*
15892 	 * if nodename can't be determined then print a message and skip it
15893 	 */
15894 	if (nodename == NULL) {
15895 		mptsas_log(mpt, CE_WARN, "mptsas found no compatible driver "
15896 		    "for target %d lun %d", target, lun);
15897 		return (DDI_FAILURE);
15898 	}
15899 
15900 	ndi_rtn = ndi_devi_alloc(pdip, nodename,
15901 	    DEVI_SID_NODEID, lun_dip);
15902 
15903 	/*
15904 	 * if lun alloc success, set props
15905 	 */
15906 	if (ndi_rtn == NDI_SUCCESS) {
15907 
15908 		if (ndi_prop_update_int(DDI_DEV_T_NONE,
15909 		    *lun_dip, LUN_PROP, lun) !=
15910 		    DDI_PROP_SUCCESS) {
15911 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15912 			    "property for target %d lun %d (LUN_PROP)",
15913 			    target, lun);
15914 			ndi_rtn = NDI_FAILURE;
15915 			goto phys_create_done;
15916 		}
15917 
15918 		lun64 = (int64_t)lun;
15919 		if (ndi_prop_update_int64(DDI_DEV_T_NONE,
15920 		    *lun_dip, LUN64_PROP, lun64) !=
15921 		    DDI_PROP_SUCCESS) {
15922 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15923 			    "property for target %d lun64 %d (LUN64_PROP)",
15924 			    target, lun);
15925 			ndi_rtn = NDI_FAILURE;
15926 			goto phys_create_done;
15927 		}
15928 		if (ndi_prop_update_string_array(DDI_DEV_T_NONE,
15929 		    *lun_dip, "compatible", compatible, ncompatible)
15930 		    != DDI_PROP_SUCCESS) {
15931 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
15932 			    "property for target %d lun %d (COMPATIBLE)",
15933 			    target, lun);
15934 			ndi_rtn = NDI_FAILURE;
15935 			goto phys_create_done;
15936 		}
15937 
15938 		/*
15939 		 * We need the SAS WWN for non-multipath devices, so
15940 		 * we'll use the same property as that multipathing
15941 		 * devices need to present for MPAPI. If we don't have
15942 		 * a WWN (e.g. parallel SCSI), don't create the prop.
15943 		 */
15944 		wwn_str = kmem_zalloc(MPTSAS_WWN_STRLEN, KM_SLEEP);
15945 		(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
15946 		if (sas_wwn && ndi_prop_update_string(DDI_DEV_T_NONE,
15947 		    *lun_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str)
15948 		    != DDI_PROP_SUCCESS) {
15949 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
15950 			    "create property for SAS target %d lun %d "
15951 			    "(target-port)", target, lun);
15952 			ndi_rtn = NDI_FAILURE;
15953 			goto phys_create_done;
15954 		}
15955 
15956 		be_sas_wwn = BE_64(sas_wwn);
15957 		if (sas_wwn && ndi_prop_update_byte_array(
15958 		    DDI_DEV_T_NONE, *lun_dip, "port-wwn",
15959 		    (uchar_t *)&be_sas_wwn, 8) != DDI_PROP_SUCCESS) {
15960 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
15961 			    "create property for SAS target %d lun %d "
15962 			    "(port-wwn)", target, lun);
15963 			ndi_rtn = NDI_FAILURE;
15964 			goto phys_create_done;
15965 		} else if ((sas_wwn == 0) && (ndi_prop_update_int(
15966 		    DDI_DEV_T_NONE, *lun_dip, "sata-phy", phy) !=
15967 		    DDI_PROP_SUCCESS)) {
15968 			/*
15969 			 * Direct attached SATA device without DeviceName
15970 			 */
15971 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
15972 			    "create property for SAS target %d lun %d "
15973 			    "(sata-phy)", target, lun);
15974 			ndi_rtn = NDI_FAILURE;
15975 			goto phys_create_done;
15976 		}
15977 
15978 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
15979 		    *lun_dip, SAS_PROP) != DDI_PROP_SUCCESS) {
15980 			mptsas_log(mpt, CE_WARN, "mptsas unable to"
15981 			    "create property for SAS target %d lun %d"
15982 			    " (SAS_PROP)", target, lun);
15983 			ndi_rtn = NDI_FAILURE;
15984 			goto phys_create_done;
15985 		}
15986 		if (guid && (ndi_prop_update_string(DDI_DEV_T_NONE,
15987 		    *lun_dip, NDI_GUID, guid) != DDI_SUCCESS)) {
15988 			mptsas_log(mpt, CE_WARN, "mptsas unable "
15989 			    "to create guid property for target %d "
15990 			    "lun %d", target, lun);
15991 			ndi_rtn = NDI_FAILURE;
15992 			goto phys_create_done;
15993 		}
15994 
15995 		/*
15996 		 * The following code is to set properties for SM-HBA support,
15997 		 * it doesn't apply to RAID volumes
15998 		 */
15999 		if (ptgt->m_addr.mta_phymask == 0)
16000 			goto phys_raid_lun;
16001 
16002 		mutex_enter(&mpt->m_mutex);
16003 
16004 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16005 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16006 		    (uint32_t)ptgt->m_devhdl;
16007 		rval = mptsas_get_sas_device_page0(mpt, page_address,
16008 		    &dev_hdl, &dev_sas_wwn, &dev_info,
16009 		    &physport, &phy_id, &pdev_hdl,
16010 		    &bay_num, &enclosure, &io_flags);
16011 		if (rval != DDI_SUCCESS) {
16012 			mutex_exit(&mpt->m_mutex);
16013 			mptsas_log(mpt, CE_WARN, "mptsas unable to get"
16014 			    "parent device for handle %d.", page_address);
16015 			ndi_rtn = NDI_FAILURE;
16016 			goto phys_create_done;
16017 		}
16018 
16019 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16020 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)pdev_hdl;
16021 		rval = mptsas_get_sas_device_page0(mpt, page_address,
16022 		    &dev_hdl, &pdev_sas_wwn, &pdev_info, &physport,
16023 		    &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
16024 		if (rval != DDI_SUCCESS) {
16025 			mutex_exit(&mpt->m_mutex);
16026 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16027 			    "device for handle %d.", page_address);
16028 			ndi_rtn = NDI_FAILURE;
16029 			goto phys_create_done;
16030 		}
16031 
16032 		mutex_exit(&mpt->m_mutex);
16033 
16034 		/*
16035 		 * If this device direct attached to the controller
16036 		 * set the attached-port to the base wwid
16037 		 */
16038 		if ((ptgt->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16039 		    != DEVINFO_DIRECT_ATTACHED) {
16040 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
16041 			    pdev_sas_wwn);
16042 		} else {
16043 			/*
16044 			 * Update the iport's attached-port to guid
16045 			 */
16046 			if (sas_wwn == 0) {
16047 				(void) sprintf(wwn_str, "p%x", phy);
16048 			} else {
16049 				(void) sprintf(wwn_str, "w%016"PRIx64, sas_wwn);
16050 			}
16051 			if (ddi_prop_update_string(DDI_DEV_T_NONE,
16052 			    pdip, SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
16053 			    DDI_PROP_SUCCESS) {
16054 				mptsas_log(mpt, CE_WARN,
16055 				    "mptsas unable to create "
16056 				    "property for iport target-port"
16057 				    " %s (sas_wwn)",
16058 				    wwn_str);
16059 				ndi_rtn = NDI_FAILURE;
16060 				goto phys_create_done;
16061 			}
16062 
16063 			(void) sprintf(pdev_wwn_str, "w%016"PRIx64,
16064 			    mpt->un.m_base_wwid);
16065 		}
16066 
16067 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
16068 		    *lun_dip, SCSI_ADDR_PROP_ATTACHED_PORT, pdev_wwn_str) !=
16069 		    DDI_PROP_SUCCESS) {
16070 			mptsas_log(mpt, CE_WARN,
16071 			    "mptsas unable to create "
16072 			    "property for iport attached-port %s (sas_wwn)",
16073 			    attached_wwn_str);
16074 			ndi_rtn = NDI_FAILURE;
16075 			goto phys_create_done;
16076 		}
16077 
16078 		if (IS_SATA_DEVICE(dev_info)) {
16079 			char	uabuf[SCSI_WWN_BUFLEN];
16080 
16081 			if (ndi_prop_update_string(DDI_DEV_T_NONE,
16082 			    *lun_dip, MPTSAS_VARIANT, "sata") !=
16083 			    DDI_PROP_SUCCESS) {
16084 				mptsas_log(mpt, CE_WARN,
16085 				    "mptsas unable to create "
16086 				    "property for device variant ");
16087 				ndi_rtn = NDI_FAILURE;
16088 				goto phys_create_done;
16089 			}
16090 
16091 			if (scsi_wwn_to_wwnstr(dev_sas_wwn, 1, uabuf) == NULL) {
16092 				mptsas_log(mpt, CE_WARN,
16093 				    "mptsas unable to format SATA bridge WWN");
16094 				ndi_rtn = NDI_FAILURE;
16095 				goto phys_create_done;
16096 			}
16097 
16098 			if (ndi_prop_update_string(DDI_DEV_T_NONE, *lun_dip,
16099 			    SCSI_ADDR_PROP_BRIDGE_PORT, uabuf) !=
16100 			    DDI_PROP_SUCCESS) {
16101 				mptsas_log(mpt, CE_WARN,
16102 				    "mptsas unable to create SCSI bridge port "
16103 				    "property for SATA device");
16104 				ndi_rtn = NDI_FAILURE;
16105 				goto phys_create_done;
16106 			}
16107 		}
16108 
16109 		if (IS_ATAPI_DEVICE(dev_info)) {
16110 			if (ndi_prop_update_string(DDI_DEV_T_NONE,
16111 			    *lun_dip, MPTSAS_VARIANT, "atapi") !=
16112 			    DDI_PROP_SUCCESS) {
16113 				mptsas_log(mpt, CE_WARN,
16114 				    "mptsas unable to create "
16115 				    "property for device variant ");
16116 				ndi_rtn = NDI_FAILURE;
16117 				goto phys_create_done;
16118 			}
16119 		}
16120 
16121 phys_raid_lun:
16122 		/*
16123 		 * if this is a SAS controller, and the target is a SATA
16124 		 * drive, set the 'pm-capable' property for sd and if on
16125 		 * an OPL platform, also check if this is an ATAPI
16126 		 * device.
16127 		 */
16128 		instance = ddi_get_instance(mpt->m_dip);
16129 		if (devinfo & (MPI2_SAS_DEVICE_INFO_SATA_DEVICE |
16130 		    MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE)) {
16131 			NDBG2(("mptsas%d: creating pm-capable property, "
16132 			    "target %d", instance, target));
16133 
16134 			if ((ndi_prop_update_int(DDI_DEV_T_NONE,
16135 			    *lun_dip, "pm-capable", 1)) !=
16136 			    DDI_PROP_SUCCESS) {
16137 				mptsas_log(mpt, CE_WARN, "mptsas "
16138 				    "failed to create pm-capable "
16139 				    "property, target %d", target);
16140 				ndi_rtn = NDI_FAILURE;
16141 				goto phys_create_done;
16142 			}
16143 
16144 		}
16145 
16146 		if ((inq->inq_dtype == 0) || (inq->inq_dtype == 5)) {
16147 			/*
16148 			 * add 'obp-path' properties for devinfo
16149 			 */
16150 			bzero(wwn_str, sizeof (wwn_str));
16151 			(void) sprintf(wwn_str, "%016"PRIx64, sas_wwn);
16152 			component = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
16153 			if (guid) {
16154 				(void) snprintf(component, MAXPATHLEN,
16155 				    "disk@w%s,%x", wwn_str, lun);
16156 			} else {
16157 				(void) snprintf(component, MAXPATHLEN,
16158 				    "disk@p%x,%x", phy, lun);
16159 			}
16160 			if (ddi_pathname_obp_set(*lun_dip, component)
16161 			    != DDI_SUCCESS) {
16162 				mptsas_log(mpt, CE_WARN, "mpt_sas driver "
16163 				    "unable to set obp-path for SAS "
16164 				    "object %s", component);
16165 				ndi_rtn = NDI_FAILURE;
16166 				goto phys_create_done;
16167 			}
16168 		}
16169 		/*
16170 		 * Create the phy-num property for non-raid disk
16171 		 */
16172 		if (ptgt->m_addr.mta_phymask != 0) {
16173 			if (ndi_prop_update_int(DDI_DEV_T_NONE,
16174 			    *lun_dip, "phy-num", ptgt->m_phynum) !=
16175 			    DDI_PROP_SUCCESS) {
16176 				mptsas_log(mpt, CE_WARN, "mptsas driver "
16177 				    "failed to create phy-num property for "
16178 				    "target %d", target);
16179 				ndi_rtn = NDI_FAILURE;
16180 				goto phys_create_done;
16181 			}
16182 		}
16183 phys_create_done:
16184 		/*
16185 		 * If props were setup ok, online the lun
16186 		 */
16187 		if (ndi_rtn == NDI_SUCCESS) {
16188 			/*
16189 			 * Try to online the new node
16190 			 */
16191 			ndi_rtn = ndi_devi_online(*lun_dip, NDI_ONLINE_ATTACH);
16192 		}
16193 
16194 		/*
16195 		 * If success set rtn flag, else unwire alloc'd lun
16196 		 */
16197 		if (ndi_rtn != NDI_SUCCESS) {
16198 			NDBG12(("mptsas driver unable to online "
16199 			    "target %d lun %d", target, lun));
16200 			ndi_prop_remove_all(*lun_dip);
16201 			(void) ndi_devi_free(*lun_dip);
16202 			*lun_dip = NULL;
16203 		}
16204 	}
16205 
16206 	scsi_hba_nodename_compatible_free(nodename, compatible);
16207 
16208 	if (wwn_str != NULL) {
16209 		kmem_free(wwn_str, MPTSAS_WWN_STRLEN);
16210 	}
16211 	if (component != NULL) {
16212 		kmem_free(component, MAXPATHLEN);
16213 	}
16214 
16215 
16216 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16217 }
16218 
16219 static int
mptsas_probe_smp(dev_info_t * pdip,uint64_t wwn)16220 mptsas_probe_smp(dev_info_t *pdip, uint64_t wwn)
16221 {
16222 	mptsas_t	*mpt = DIP2MPT(pdip);
16223 	struct smp_device smp_sd;
16224 
16225 	/* XXX An HBA driver should not be allocating an smp_device. */
16226 	bzero(&smp_sd, sizeof (struct smp_device));
16227 	smp_sd.smp_sd_address.smp_a_hba_tran = mpt->m_smptran;
16228 	bcopy(&wwn, smp_sd.smp_sd_address.smp_a_wwn, SAS_WWN_BYTE_SIZE);
16229 
16230 	if (smp_probe(&smp_sd) != DDI_PROBE_SUCCESS)
16231 		return (NDI_FAILURE);
16232 	return (NDI_SUCCESS);
16233 }
16234 
16235 static int
mptsas_config_smp(dev_info_t * pdip,uint64_t sas_wwn,dev_info_t ** smp_dip)16236 mptsas_config_smp(dev_info_t *pdip, uint64_t sas_wwn, dev_info_t **smp_dip)
16237 {
16238 	mptsas_t	*mpt = DIP2MPT(pdip);
16239 	mptsas_smp_t	*psmp = NULL;
16240 	int		rval;
16241 	int		phymask;
16242 
16243 	/*
16244 	 * Get the physical port associated to the iport
16245 	 * PHYMASK TODO
16246 	 */
16247 	phymask = ddi_prop_get_int(DDI_DEV_T_ANY, pdip, 0,
16248 	    "phymask", 0);
16249 	/*
16250 	 * Find the smp node in hash table with specified sas address and
16251 	 * physical port
16252 	 */
16253 	psmp = mptsas_wwid_to_psmp(mpt, phymask, sas_wwn);
16254 	if (psmp == NULL) {
16255 		return (DDI_FAILURE);
16256 	}
16257 
16258 	rval = mptsas_online_smp(pdip, psmp, smp_dip);
16259 
16260 	return (rval);
16261 }
16262 
16263 static int
mptsas_online_smp(dev_info_t * pdip,mptsas_smp_t * smp_node,dev_info_t ** smp_dip)16264 mptsas_online_smp(dev_info_t *pdip, mptsas_smp_t *smp_node,
16265     dev_info_t **smp_dip)
16266 {
16267 	char		wwn_str[MPTSAS_WWN_STRLEN];
16268 	char		attached_wwn_str[MPTSAS_WWN_STRLEN];
16269 	int		ndi_rtn = NDI_FAILURE;
16270 	int		rval = 0;
16271 	mptsas_smp_t	dev_info;
16272 	uint32_t	page_address;
16273 	mptsas_t	*mpt = DIP2MPT(pdip);
16274 	uint16_t	dev_hdl;
16275 	uint64_t	sas_wwn;
16276 	uint64_t	smp_sas_wwn;
16277 	uint8_t		physport;
16278 	uint8_t		phy_id;
16279 	uint16_t	pdev_hdl;
16280 	uint8_t		numphys = 0;
16281 	uint16_t	i = 0;
16282 	char		phymask[MPTSAS_MAX_PHYS];
16283 	char		*iport = NULL;
16284 	mptsas_phymask_t	phy_mask = 0;
16285 	uint16_t	attached_devhdl;
16286 	uint16_t	bay_num, enclosure, io_flags;
16287 
16288 	(void) sprintf(wwn_str, "%"PRIx64, smp_node->m_addr.mta_wwn);
16289 
16290 	/*
16291 	 * Probe smp device, prevent the node of removed device from being
16292 	 * configured succesfully
16293 	 */
16294 	if (mptsas_probe_smp(pdip, smp_node->m_addr.mta_wwn) != NDI_SUCCESS) {
16295 		return (DDI_FAILURE);
16296 	}
16297 
16298 	if ((*smp_dip = mptsas_find_smp_child(pdip, wwn_str)) != NULL) {
16299 		return (DDI_SUCCESS);
16300 	}
16301 
16302 	ndi_rtn = ndi_devi_alloc(pdip, "smp", DEVI_SID_NODEID, smp_dip);
16303 
16304 	/*
16305 	 * if lun alloc success, set props
16306 	 */
16307 	if (ndi_rtn == NDI_SUCCESS) {
16308 		/*
16309 		 * Set the flavor of the child to be SMP flavored
16310 		 */
16311 		ndi_flavor_set(*smp_dip, SCSA_FLAVOR_SMP);
16312 
16313 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
16314 		    *smp_dip, SMP_WWN, wwn_str) !=
16315 		    DDI_PROP_SUCCESS) {
16316 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16317 			    "property for smp device %s (sas_wwn)",
16318 			    wwn_str);
16319 			ndi_rtn = NDI_FAILURE;
16320 			goto smp_create_done;
16321 		}
16322 		(void) sprintf(wwn_str, "w%"PRIx64, smp_node->m_addr.mta_wwn);
16323 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
16324 		    *smp_dip, SCSI_ADDR_PROP_TARGET_PORT, wwn_str) !=
16325 		    DDI_PROP_SUCCESS) {
16326 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16327 			    "property for iport target-port %s (sas_wwn)",
16328 			    wwn_str);
16329 			ndi_rtn = NDI_FAILURE;
16330 			goto smp_create_done;
16331 		}
16332 
16333 		mutex_enter(&mpt->m_mutex);
16334 
16335 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_HNDL &
16336 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | smp_node->m_devhdl;
16337 		rval = mptsas_get_sas_expander_page0(mpt, page_address,
16338 		    &dev_info);
16339 		if (rval != DDI_SUCCESS) {
16340 			mutex_exit(&mpt->m_mutex);
16341 			mptsas_log(mpt, CE_WARN,
16342 			    "mptsas unable to get expander "
16343 			    "parent device info for %x", page_address);
16344 			ndi_rtn = NDI_FAILURE;
16345 			goto smp_create_done;
16346 		}
16347 
16348 		smp_node->m_pdevhdl = dev_info.m_pdevhdl;
16349 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16350 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16351 		    (uint32_t)dev_info.m_pdevhdl;
16352 		rval = mptsas_get_sas_device_page0(mpt, page_address,
16353 		    &dev_hdl, &sas_wwn, &smp_node->m_pdevinfo, &physport,
16354 		    &phy_id, &pdev_hdl, &bay_num, &enclosure, &io_flags);
16355 		if (rval != DDI_SUCCESS) {
16356 			mutex_exit(&mpt->m_mutex);
16357 			mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16358 			    "device info for %x", page_address);
16359 			ndi_rtn = NDI_FAILURE;
16360 			goto smp_create_done;
16361 		}
16362 
16363 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_HANDLE &
16364 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) |
16365 		    (uint32_t)dev_info.m_devhdl;
16366 		rval = mptsas_get_sas_device_page0(mpt, page_address,
16367 		    &dev_hdl, &smp_sas_wwn, &smp_node->m_deviceinfo,
16368 		    &physport, &phy_id, &pdev_hdl, &bay_num, &enclosure,
16369 		    &io_flags);
16370 		if (rval != DDI_SUCCESS) {
16371 			mutex_exit(&mpt->m_mutex);
16372 			mptsas_log(mpt, CE_WARN, "mptsas unable to get "
16373 			    "device info for %x", page_address);
16374 			ndi_rtn = NDI_FAILURE;
16375 			goto smp_create_done;
16376 		}
16377 		mutex_exit(&mpt->m_mutex);
16378 
16379 		/*
16380 		 * If this smp direct attached to the controller
16381 		 * set the attached-port to the base wwid
16382 		 */
16383 		if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16384 		    != DEVINFO_DIRECT_ATTACHED) {
16385 			(void) sprintf(attached_wwn_str, "w%016"PRIx64,
16386 			    sas_wwn);
16387 		} else {
16388 			(void) sprintf(attached_wwn_str, "w%016"PRIx64,
16389 			    mpt->un.m_base_wwid);
16390 		}
16391 
16392 		if (ndi_prop_update_string(DDI_DEV_T_NONE,
16393 		    *smp_dip, SCSI_ADDR_PROP_ATTACHED_PORT, attached_wwn_str) !=
16394 		    DDI_PROP_SUCCESS) {
16395 			mptsas_log(mpt, CE_WARN, "mptsas unable to create "
16396 			    "property for smp attached-port %s (sas_wwn)",
16397 			    attached_wwn_str);
16398 			ndi_rtn = NDI_FAILURE;
16399 			goto smp_create_done;
16400 		}
16401 
16402 		if (ndi_prop_create_boolean(DDI_DEV_T_NONE,
16403 		    *smp_dip, SMP_PROP) != DDI_PROP_SUCCESS) {
16404 			mptsas_log(mpt, CE_WARN, "mptsas unable to "
16405 			    "create property for SMP %s (SMP_PROP) ",
16406 			    wwn_str);
16407 			ndi_rtn = NDI_FAILURE;
16408 			goto smp_create_done;
16409 		}
16410 
16411 		/*
16412 		 * check the smp to see whether it direct
16413 		 * attached to the controller
16414 		 */
16415 		if ((smp_node->m_deviceinfo & DEVINFO_DIRECT_ATTACHED)
16416 		    != DEVINFO_DIRECT_ATTACHED) {
16417 			goto smp_create_done;
16418 		}
16419 		numphys = ddi_prop_get_int(DDI_DEV_T_ANY, pdip,
16420 		    DDI_PROP_DONTPASS, MPTSAS_NUM_PHYS, -1);
16421 		if (numphys > 0) {
16422 			goto smp_create_done;
16423 		}
16424 		/*
16425 		 * this iport is an old iport, we need to
16426 		 * reconfig the props for it.
16427 		 */
16428 		if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16429 		    MPTSAS_VIRTUAL_PORT, 0) !=
16430 		    DDI_PROP_SUCCESS) {
16431 			(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16432 			    MPTSAS_VIRTUAL_PORT);
16433 			mptsas_log(mpt, CE_WARN, "mptsas virtual port "
16434 			    "prop update failed");
16435 			goto smp_create_done;
16436 		}
16437 
16438 		mutex_enter(&mpt->m_mutex);
16439 		numphys = 0;
16440 		iport = ddi_get_name_addr(pdip);
16441 		for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16442 			bzero(phymask, sizeof (phymask));
16443 			(void) sprintf(phymask,
16444 			    "%x", mpt->m_phy_info[i].phy_mask);
16445 			if (strcmp(phymask, iport) == 0) {
16446 				phy_mask = mpt->m_phy_info[i].phy_mask;
16447 				break;
16448 			}
16449 		}
16450 
16451 		for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16452 			if ((phy_mask >> i) & 0x01) {
16453 				numphys++;
16454 			}
16455 		}
16456 		/*
16457 		 * Update PHY info for smhba
16458 		 */
16459 		if (mptsas_smhba_phy_init(mpt)) {
16460 			mutex_exit(&mpt->m_mutex);
16461 			mptsas_log(mpt, CE_WARN, "mptsas phy update "
16462 			    "failed");
16463 			goto smp_create_done;
16464 		}
16465 		mutex_exit(&mpt->m_mutex);
16466 
16467 		mptsas_smhba_set_all_phy_props(mpt, pdip, numphys, phy_mask,
16468 		    &attached_devhdl);
16469 
16470 		if (ddi_prop_update_int(DDI_DEV_T_NONE, pdip,
16471 		    MPTSAS_NUM_PHYS, numphys) !=
16472 		    DDI_PROP_SUCCESS) {
16473 			(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16474 			    MPTSAS_NUM_PHYS);
16475 			mptsas_log(mpt, CE_WARN, "mptsas update "
16476 			    "num phys props failed");
16477 			goto smp_create_done;
16478 		}
16479 		/*
16480 		 * Add parent's props for SMHBA support
16481 		 */
16482 		if (ddi_prop_update_string(DDI_DEV_T_NONE, pdip,
16483 		    SCSI_ADDR_PROP_ATTACHED_PORT, wwn_str) !=
16484 		    DDI_PROP_SUCCESS) {
16485 			(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip,
16486 			    SCSI_ADDR_PROP_ATTACHED_PORT);
16487 			mptsas_log(mpt, CE_WARN, "mptsas update iport"
16488 			    "attached-port failed");
16489 			goto smp_create_done;
16490 		}
16491 
16492 smp_create_done:
16493 		/*
16494 		 * If props were setup ok, online the lun
16495 		 */
16496 		if (ndi_rtn == NDI_SUCCESS) {
16497 			/*
16498 			 * Try to online the new node
16499 			 */
16500 			ndi_rtn = ndi_devi_online(*smp_dip, NDI_ONLINE_ATTACH);
16501 		}
16502 
16503 		/*
16504 		 * If success set rtn flag, else unwire alloc'd lun
16505 		 */
16506 		if (ndi_rtn != NDI_SUCCESS) {
16507 			NDBG12(("mptsas unable to online "
16508 			    "SMP target %s", wwn_str));
16509 			ndi_prop_remove_all(*smp_dip);
16510 			(void) ndi_devi_free(*smp_dip);
16511 		}
16512 	}
16513 
16514 	return ((ndi_rtn == NDI_SUCCESS) ? DDI_SUCCESS : DDI_FAILURE);
16515 }
16516 
16517 /* smp transport routine */
mptsas_smp_start(struct smp_pkt * smp_pkt)16518 static int mptsas_smp_start(struct smp_pkt *smp_pkt)
16519 {
16520 	uint64_t			wwn;
16521 	Mpi2SmpPassthroughRequest_t	req;
16522 	Mpi2SmpPassthroughReply_t	rep;
16523 	uint32_t			direction = 0;
16524 	mptsas_t			*mpt;
16525 	int				ret;
16526 	uint64_t			tmp64;
16527 
16528 	mpt = (mptsas_t *)smp_pkt->smp_pkt_address->
16529 	    smp_a_hba_tran->smp_tran_hba_private;
16530 
16531 	bcopy(smp_pkt->smp_pkt_address->smp_a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
16532 	/*
16533 	 * Need to compose a SMP request message
16534 	 * and call mptsas_do_passthru() function
16535 	 */
16536 	bzero(&req, sizeof (req));
16537 	bzero(&rep, sizeof (rep));
16538 	req.PassthroughFlags = 0;
16539 	req.PhysicalPort = 0xff;
16540 	req.ChainOffset = 0;
16541 	req.Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
16542 
16543 	if ((smp_pkt->smp_pkt_reqsize & 0xffff0000ul) != 0) {
16544 		smp_pkt->smp_pkt_reason = ERANGE;
16545 		return (DDI_FAILURE);
16546 	}
16547 	req.RequestDataLength = LE_16((uint16_t)(smp_pkt->smp_pkt_reqsize - 4));
16548 
16549 	req.MsgFlags = 0;
16550 	tmp64 = LE_64(wwn);
16551 	bcopy(&tmp64, &req.SASAddress, SAS_WWN_BYTE_SIZE);
16552 	if (smp_pkt->smp_pkt_rspsize > 0) {
16553 		direction |= MPTSAS_PASS_THRU_DIRECTION_READ;
16554 	}
16555 	if (smp_pkt->smp_pkt_reqsize > 0) {
16556 		direction |= MPTSAS_PASS_THRU_DIRECTION_WRITE;
16557 	}
16558 
16559 	mutex_enter(&mpt->m_mutex);
16560 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep,
16561 	    (uint8_t *)smp_pkt->smp_pkt_rsp,
16562 	    offsetof(Mpi2SmpPassthroughRequest_t, SGL), sizeof (rep),
16563 	    smp_pkt->smp_pkt_rspsize - 4, direction,
16564 	    (uint8_t *)smp_pkt->smp_pkt_req, smp_pkt->smp_pkt_reqsize - 4,
16565 	    smp_pkt->smp_pkt_timeout, FKIOCTL);
16566 	mutex_exit(&mpt->m_mutex);
16567 	if (ret != 0) {
16568 		cmn_err(CE_WARN, "smp_start do passthru error %d", ret);
16569 		smp_pkt->smp_pkt_reason = (uchar_t)(ret);
16570 		return (DDI_FAILURE);
16571 	}
16572 	/* do passthrough success, check the smp status */
16573 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16574 		switch (LE_16(rep.IOCStatus)) {
16575 		case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
16576 			smp_pkt->smp_pkt_reason = ENODEV;
16577 			break;
16578 		case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
16579 			smp_pkt->smp_pkt_reason = EOVERFLOW;
16580 			break;
16581 		case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
16582 			smp_pkt->smp_pkt_reason = EIO;
16583 			break;
16584 		default:
16585 			mptsas_log(mpt, CE_NOTE, "smp_start: get unknown ioc"
16586 			    "status:%x", LE_16(rep.IOCStatus));
16587 			smp_pkt->smp_pkt_reason = EIO;
16588 			break;
16589 		}
16590 		return (DDI_FAILURE);
16591 	}
16592 	if (rep.SASStatus != MPI2_SASSTATUS_SUCCESS) {
16593 		mptsas_log(mpt, CE_NOTE, "smp_start: get error SAS status:%x",
16594 		    rep.SASStatus);
16595 		smp_pkt->smp_pkt_reason = EIO;
16596 		return (DDI_FAILURE);
16597 	}
16598 
16599 	return (DDI_SUCCESS);
16600 }
16601 
16602 /*
16603  * If we didn't get a match, we need to get sas page0 for each device, and
16604  * untill we get a match. If failed, return NULL
16605  */
16606 static mptsas_target_t *
mptsas_phy_to_tgt(mptsas_t * mpt,mptsas_phymask_t phymask,uint8_t phy)16607 mptsas_phy_to_tgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint8_t phy)
16608 {
16609 	int		i, j = 0;
16610 	int		rval = 0;
16611 	uint16_t	cur_handle;
16612 	uint32_t	page_address;
16613 	mptsas_target_t	*ptgt = NULL;
16614 
16615 	/*
16616 	 * PHY named device must be direct attached and attaches to
16617 	 * narrow port, if the iport is not parent of the device which
16618 	 * we are looking for.
16619 	 */
16620 	for (i = 0; i < MPTSAS_MAX_PHYS; i++) {
16621 		if ((1 << i) & phymask)
16622 			j++;
16623 	}
16624 
16625 	if (j > 1)
16626 		return (NULL);
16627 
16628 	/*
16629 	 * Must be a narrow port and single device attached to the narrow port
16630 	 * So the physical port num of device  which is equal to the iport's
16631 	 * port num is the device what we are looking for.
16632 	 */
16633 
16634 	if (mpt->m_phy_info[phy].phy_mask != phymask)
16635 		return (NULL);
16636 
16637 	mutex_enter(&mpt->m_mutex);
16638 
16639 	ptgt = refhash_linear_search(mpt->m_targets, mptsas_target_eval_nowwn,
16640 	    &phy);
16641 	if (ptgt != NULL) {
16642 		mutex_exit(&mpt->m_mutex);
16643 		return (ptgt);
16644 	}
16645 
16646 	if (mpt->m_done_traverse_dev) {
16647 		mutex_exit(&mpt->m_mutex);
16648 		return (NULL);
16649 	}
16650 
16651 	/* If didn't get a match, come here */
16652 	cur_handle = mpt->m_dev_handle;
16653 	for (; ; ) {
16654 		ptgt = NULL;
16655 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16656 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16657 		rval = mptsas_get_target_device_info(mpt, page_address,
16658 		    &cur_handle, &ptgt);
16659 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
16660 		    (rval == DEV_INFO_FAIL_ALLOC)) {
16661 			break;
16662 		}
16663 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16664 		    (rval == DEV_INFO_PHYS_DISK) ||
16665 		    (rval == DEV_INFO_FAIL_GUID)) {
16666 			continue;
16667 		}
16668 		mpt->m_dev_handle = cur_handle;
16669 
16670 		if ((ptgt->m_addr.mta_wwn == 0) && (ptgt->m_phynum == phy)) {
16671 			break;
16672 		}
16673 	}
16674 
16675 	mutex_exit(&mpt->m_mutex);
16676 	return (ptgt);
16677 }
16678 
16679 /*
16680  * The ptgt->m_addr.mta_wwn contains the wwid for each disk.
16681  * For Raid volumes, we need to check m_raidvol[x].m_raidwwid
16682  * If we didn't get a match, we need to get sas page0 for each device, and
16683  * untill we get a match
16684  * If failed, return NULL
16685  */
16686 static mptsas_target_t *
mptsas_wwid_to_ptgt(mptsas_t * mpt,mptsas_phymask_t phymask,uint64_t wwid)16687 mptsas_wwid_to_ptgt(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16688 {
16689 	int		rval = 0;
16690 	uint16_t	cur_handle;
16691 	uint32_t	page_address;
16692 	mptsas_target_t	*tmp_tgt = NULL;
16693 	mptsas_target_addr_t addr;
16694 
16695 	addr.mta_wwn = wwid;
16696 	addr.mta_phymask = phymask;
16697 	mutex_enter(&mpt->m_mutex);
16698 	tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16699 	if (tmp_tgt != NULL) {
16700 		mutex_exit(&mpt->m_mutex);
16701 		return (tmp_tgt);
16702 	}
16703 
16704 	if (phymask == 0) {
16705 		/*
16706 		 * It's IR volume
16707 		 */
16708 		rval = mptsas_get_raid_info(mpt);
16709 		if (rval) {
16710 			tmp_tgt = refhash_lookup(mpt->m_targets, &addr);
16711 		}
16712 		mutex_exit(&mpt->m_mutex);
16713 		return (tmp_tgt);
16714 	}
16715 
16716 	if (mpt->m_done_traverse_dev) {
16717 		mutex_exit(&mpt->m_mutex);
16718 		return (NULL);
16719 	}
16720 
16721 	/* If didn't get a match, come here */
16722 	cur_handle = mpt->m_dev_handle;
16723 	for (;;) {
16724 		tmp_tgt = NULL;
16725 		page_address = (MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE &
16726 		    MPI2_SAS_DEVICE_PGAD_FORM_MASK) | cur_handle;
16727 		rval = mptsas_get_target_device_info(mpt, page_address,
16728 		    &cur_handle, &tmp_tgt);
16729 		if ((rval == DEV_INFO_FAIL_PAGE0) ||
16730 		    (rval == DEV_INFO_FAIL_ALLOC)) {
16731 			tmp_tgt = NULL;
16732 			break;
16733 		}
16734 		if ((rval == DEV_INFO_WRONG_DEVICE_TYPE) ||
16735 		    (rval == DEV_INFO_PHYS_DISK) ||
16736 		    (rval == DEV_INFO_FAIL_GUID)) {
16737 			continue;
16738 		}
16739 		mpt->m_dev_handle = cur_handle;
16740 		if ((tmp_tgt->m_addr.mta_wwn) &&
16741 		    (tmp_tgt->m_addr.mta_wwn == wwid) &&
16742 		    (tmp_tgt->m_addr.mta_phymask == phymask)) {
16743 			break;
16744 		}
16745 	}
16746 
16747 	mutex_exit(&mpt->m_mutex);
16748 	return (tmp_tgt);
16749 }
16750 
16751 static mptsas_smp_t *
mptsas_wwid_to_psmp(mptsas_t * mpt,mptsas_phymask_t phymask,uint64_t wwid)16752 mptsas_wwid_to_psmp(mptsas_t *mpt, mptsas_phymask_t phymask, uint64_t wwid)
16753 {
16754 	int		rval = 0;
16755 	uint16_t	cur_handle;
16756 	uint32_t	page_address;
16757 	mptsas_smp_t	smp_node, *psmp = NULL;
16758 	mptsas_target_addr_t addr;
16759 
16760 	addr.mta_wwn = wwid;
16761 	addr.mta_phymask = phymask;
16762 	mutex_enter(&mpt->m_mutex);
16763 	psmp = refhash_lookup(mpt->m_smp_targets, &addr);
16764 	if (psmp != NULL) {
16765 		mutex_exit(&mpt->m_mutex);
16766 		return (psmp);
16767 	}
16768 
16769 	if (mpt->m_done_traverse_smp) {
16770 		mutex_exit(&mpt->m_mutex);
16771 		return (NULL);
16772 	}
16773 
16774 	/* If didn't get a match, come here */
16775 	cur_handle = mpt->m_smp_devhdl;
16776 	for (;;) {
16777 		psmp = NULL;
16778 		page_address = (MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL &
16779 		    MPI2_SAS_EXPAND_PGAD_FORM_MASK) | (uint32_t)cur_handle;
16780 		rval = mptsas_get_sas_expander_page0(mpt, page_address,
16781 		    &smp_node);
16782 		if (rval != DDI_SUCCESS) {
16783 			break;
16784 		}
16785 		mpt->m_smp_devhdl = cur_handle = smp_node.m_devhdl;
16786 		psmp = mptsas_smp_alloc(mpt, &smp_node);
16787 		ASSERT(psmp);
16788 		if ((psmp->m_addr.mta_wwn) && (psmp->m_addr.mta_wwn == wwid) &&
16789 		    (psmp->m_addr.mta_phymask == phymask)) {
16790 			break;
16791 		}
16792 	}
16793 
16794 	mutex_exit(&mpt->m_mutex);
16795 	return (psmp);
16796 }
16797 
16798 mptsas_target_t *
mptsas_tgt_alloc(refhash_t * refhash,uint16_t devhdl,uint64_t wwid,uint32_t devinfo,mptsas_phymask_t phymask,uint8_t phynum)16799 mptsas_tgt_alloc(refhash_t *refhash, uint16_t devhdl, uint64_t wwid,
16800     uint32_t devinfo, mptsas_phymask_t phymask, uint8_t phynum)
16801 {
16802 	mptsas_target_t *tmp_tgt = NULL;
16803 	mptsas_target_addr_t addr;
16804 
16805 	addr.mta_wwn = wwid;
16806 	addr.mta_phymask = phymask;
16807 	tmp_tgt = refhash_lookup(refhash, &addr);
16808 	if (tmp_tgt != NULL) {
16809 		NDBG20(("Hash item already exist"));
16810 		tmp_tgt->m_deviceinfo = devinfo;
16811 		tmp_tgt->m_devhdl = devhdl;	/* XXX - duplicate? */
16812 		return (tmp_tgt);
16813 	}
16814 	tmp_tgt = kmem_zalloc(sizeof (struct mptsas_target), KM_SLEEP);
16815 	if (tmp_tgt == NULL) {
16816 		cmn_err(CE_WARN, "Fatal, allocated tgt failed");
16817 		return (NULL);
16818 	}
16819 	tmp_tgt->m_devhdl = devhdl;
16820 	tmp_tgt->m_addr.mta_wwn = wwid;
16821 	tmp_tgt->m_deviceinfo = devinfo;
16822 	tmp_tgt->m_addr.mta_phymask = phymask;
16823 	tmp_tgt->m_phynum = phynum;
16824 	/* Initialized the tgt structure */
16825 	tmp_tgt->m_qfull_retries = QFULL_RETRIES;
16826 	tmp_tgt->m_qfull_retry_interval =
16827 	    drv_usectohz(QFULL_RETRY_INTERVAL * 1000);
16828 	tmp_tgt->m_t_throttle = MAX_THROTTLE;
16829 	TAILQ_INIT(&tmp_tgt->m_active_cmdq);
16830 
16831 	refhash_insert(refhash, tmp_tgt);
16832 
16833 	return (tmp_tgt);
16834 }
16835 
16836 static void
mptsas_smp_target_copy(mptsas_smp_t * src,mptsas_smp_t * dst)16837 mptsas_smp_target_copy(mptsas_smp_t *src, mptsas_smp_t *dst)
16838 {
16839 	dst->m_devhdl = src->m_devhdl;
16840 	dst->m_deviceinfo = src->m_deviceinfo;
16841 	dst->m_pdevhdl = src->m_pdevhdl;
16842 	dst->m_pdevinfo = src->m_pdevinfo;
16843 }
16844 
16845 static mptsas_smp_t *
mptsas_smp_alloc(mptsas_t * mpt,mptsas_smp_t * data)16846 mptsas_smp_alloc(mptsas_t *mpt, mptsas_smp_t *data)
16847 {
16848 	mptsas_target_addr_t addr;
16849 	mptsas_smp_t *ret_data;
16850 
16851 	addr.mta_wwn = data->m_addr.mta_wwn;
16852 	addr.mta_phymask = data->m_addr.mta_phymask;
16853 	ret_data = refhash_lookup(mpt->m_smp_targets, &addr);
16854 	/*
16855 	 * If there's already a matching SMP target, update its fields
16856 	 * in place.  Since the address is not changing, it's safe to do
16857 	 * this.  We cannot just bcopy() here because the structure we've
16858 	 * been given has invalid hash links.
16859 	 */
16860 	if (ret_data != NULL) {
16861 		mptsas_smp_target_copy(data, ret_data);
16862 		return (ret_data);
16863 	}
16864 
16865 	ret_data = kmem_alloc(sizeof (mptsas_smp_t), KM_SLEEP);
16866 	bcopy(data, ret_data, sizeof (mptsas_smp_t));
16867 	refhash_insert(mpt->m_smp_targets, ret_data);
16868 	return (ret_data);
16869 }
16870 
16871 /*
16872  * Functions for SGPIO LED support
16873  */
16874 static dev_info_t *
mptsas_get_dip_from_dev(dev_t dev,mptsas_phymask_t * phymask)16875 mptsas_get_dip_from_dev(dev_t dev, mptsas_phymask_t *phymask)
16876 {
16877 	dev_info_t	*dip;
16878 	int		prop;
16879 	dip = e_ddi_hold_devi_by_dev(dev, 0);
16880 	if (dip == NULL)
16881 		return (dip);
16882 	prop = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
16883 	    "phymask", 0);
16884 	*phymask = (mptsas_phymask_t)prop;
16885 	ddi_release_devi(dip);
16886 	return (dip);
16887 }
16888 static mptsas_target_t *
mptsas_addr_to_ptgt(mptsas_t * mpt,char * addr,mptsas_phymask_t phymask)16889 mptsas_addr_to_ptgt(mptsas_t *mpt, char *addr, mptsas_phymask_t phymask)
16890 {
16891 	uint8_t			phynum;
16892 	uint64_t		wwn;
16893 	int			lun;
16894 	mptsas_target_t		*ptgt = NULL;
16895 
16896 	if (mptsas_parse_address(addr, &wwn, &phynum, &lun) != DDI_SUCCESS) {
16897 		return (NULL);
16898 	}
16899 	if (addr[0] == 'w') {
16900 		ptgt = mptsas_wwid_to_ptgt(mpt, (int)phymask, wwn);
16901 	} else {
16902 		ptgt = mptsas_phy_to_tgt(mpt, (int)phymask, phynum);
16903 	}
16904 	return (ptgt);
16905 }
16906 
16907 static int
mptsas_flush_led_status(mptsas_t * mpt,mptsas_enclosure_t * mep,uint16_t idx)16908 mptsas_flush_led_status(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx)
16909 {
16910 	uint32_t slotstatus = 0;
16911 
16912 	ASSERT3U(idx, <, mep->me_nslots);
16913 
16914 	/* Build an MPI2 Slot Status based on our view of the world */
16915 	if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_IDENT - 1)))
16916 		slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST;
16917 	if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_FAIL - 1)))
16918 		slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT;
16919 	if (mep->me_slotleds[idx] & (1 << (MPTSAS_LEDCTL_LED_OK2RM - 1)))
16920 		slotstatus |= MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE;
16921 
16922 	/* Write it to the controller */
16923 	NDBG14(("mptsas_ioctl: set LED status %x for slot %x",
16924 	    slotstatus, idx + mep->me_fslot));
16925 	return (mptsas_send_sep(mpt, mep, idx, &slotstatus,
16926 	    MPI2_SEP_REQ_ACTION_WRITE_STATUS));
16927 }
16928 
16929 /*
16930  *  send sep request, use enclosure/slot addressing
16931  */
16932 static int
mptsas_send_sep(mptsas_t * mpt,mptsas_enclosure_t * mep,uint16_t idx,uint32_t * status,uint8_t act)16933 mptsas_send_sep(mptsas_t *mpt, mptsas_enclosure_t *mep, uint16_t idx,
16934     uint32_t *status, uint8_t act)
16935 {
16936 	Mpi2SepRequest_t	req;
16937 	Mpi2SepReply_t		rep;
16938 	int			ret;
16939 	uint16_t		enctype;
16940 	uint16_t		slot;
16941 
16942 	ASSERT(mutex_owned(&mpt->m_mutex));
16943 
16944 	/*
16945 	 * Look through the enclosures and make sure that this enclosure is
16946 	 * something that is directly attached device. If we didn't find an
16947 	 * enclosure for this device, don't send the ioctl.
16948 	 */
16949 	enctype = mep->me_flags & MPI2_SAS_ENCLS0_FLAGS_MNG_MASK;
16950 	if (enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES &&
16951 	    enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO &&
16952 	    enctype != MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO) {
16953 		return (ENOTTY);
16954 	}
16955 	slot = idx + mep->me_fslot;
16956 
16957 	bzero(&req, sizeof (req));
16958 	bzero(&rep, sizeof (rep));
16959 
16960 	req.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
16961 	req.Action = act;
16962 	req.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
16963 	req.EnclosureHandle = LE_16(mep->me_enchdl);
16964 	req.Slot = LE_16(slot);
16965 	if (act == MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
16966 		req.SlotStatus = LE_32(*status);
16967 	}
16968 	ret = mptsas_do_passthru(mpt, (uint8_t *)&req, (uint8_t *)&rep, NULL,
16969 	    sizeof (req), sizeof (rep), 0, MPTSAS_PASS_THRU_DIRECTION_NONE,
16970 	    NULL, 0, 60, FKIOCTL);
16971 	if (ret != 0) {
16972 		mptsas_log(mpt, CE_NOTE, "mptsas_send_sep: passthru SEP "
16973 		    "Processor Request message error %d", ret);
16974 		return (ret);
16975 	}
16976 	/* do passthrough success, check the ioc status */
16977 	if (LE_16(rep.IOCStatus) != MPI2_IOCSTATUS_SUCCESS) {
16978 		mptsas_log(mpt, CE_NOTE, "send_sep act %x: ioc "
16979 		    "status:%x loginfo %x", act, LE_16(rep.IOCStatus),
16980 		    LE_32(rep.IOCLogInfo));
16981 		switch (LE_16(rep.IOCStatus) & MPI2_IOCSTATUS_MASK) {
16982 		case MPI2_IOCSTATUS_INVALID_FUNCTION:
16983 		case MPI2_IOCSTATUS_INVALID_VPID:
16984 		case MPI2_IOCSTATUS_INVALID_FIELD:
16985 		case MPI2_IOCSTATUS_INVALID_STATE:
16986 		case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
16987 		case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
16988 		case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
16989 		case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
16990 		case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
16991 		case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
16992 			return (EINVAL);
16993 		case MPI2_IOCSTATUS_BUSY:
16994 			return (EBUSY);
16995 		case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
16996 			return (EAGAIN);
16997 		case MPI2_IOCSTATUS_INVALID_SGL:
16998 		case MPI2_IOCSTATUS_INTERNAL_ERROR:
16999 		case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
17000 		default:
17001 			return (EIO);
17002 		}
17003 	}
17004 	if (act != MPI2_SEP_REQ_ACTION_WRITE_STATUS) {
17005 		*status = LE_32(rep.SlotStatus);
17006 	}
17007 
17008 	return (0);
17009 }
17010 
17011 int
mptsas_dma_addr_create(mptsas_t * mpt,ddi_dma_attr_t dma_attr,ddi_dma_handle_t * dma_hdp,ddi_acc_handle_t * acc_hdp,caddr_t * dma_memp,uint32_t alloc_size,ddi_dma_cookie_t * cookiep)17012 mptsas_dma_addr_create(mptsas_t *mpt, ddi_dma_attr_t dma_attr,
17013     ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp, caddr_t *dma_memp,
17014     uint32_t alloc_size, ddi_dma_cookie_t *cookiep)
17015 {
17016 	ddi_dma_cookie_t	new_cookie;
17017 	size_t			alloc_len;
17018 	uint_t			ncookie;
17019 
17020 	if (cookiep == NULL)
17021 		cookiep = &new_cookie;
17022 
17023 	if (ddi_dma_alloc_handle(mpt->m_dip, &dma_attr, DDI_DMA_SLEEP,
17024 	    NULL, dma_hdp) != DDI_SUCCESS) {
17025 		return (FALSE);
17026 	}
17027 
17028 	if (ddi_dma_mem_alloc(*dma_hdp, alloc_size, &mpt->m_dev_acc_attr,
17029 	    DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, dma_memp, &alloc_len,
17030 	    acc_hdp) != DDI_SUCCESS) {
17031 		ddi_dma_free_handle(dma_hdp);
17032 		*dma_hdp = NULL;
17033 		return (FALSE);
17034 	}
17035 
17036 	if (ddi_dma_addr_bind_handle(*dma_hdp, NULL, *dma_memp, alloc_len,
17037 	    (DDI_DMA_RDWR | DDI_DMA_CONSISTENT), DDI_DMA_SLEEP, NULL,
17038 	    cookiep, &ncookie) != DDI_DMA_MAPPED) {
17039 		(void) ddi_dma_mem_free(acc_hdp);
17040 		ddi_dma_free_handle(dma_hdp);
17041 		*dma_hdp = NULL;
17042 		return (FALSE);
17043 	}
17044 
17045 	return (TRUE);
17046 }
17047 
17048 void
mptsas_dma_addr_destroy(ddi_dma_handle_t * dma_hdp,ddi_acc_handle_t * acc_hdp)17049 mptsas_dma_addr_destroy(ddi_dma_handle_t *dma_hdp, ddi_acc_handle_t *acc_hdp)
17050 {
17051 	if (*dma_hdp == NULL)
17052 		return;
17053 
17054 	(void) ddi_dma_unbind_handle(*dma_hdp);
17055 	(void) ddi_dma_mem_free(acc_hdp);
17056 	ddi_dma_free_handle(dma_hdp);
17057 	*dma_hdp = NULL;
17058 }
17059 
17060 /*
17061  * DDI UFM Callbacks
17062  */
17063 static int
mptsas_ufm_fill_image(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,ddi_ufm_image_t * img)17064 mptsas_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
17065     ddi_ufm_image_t *img)
17066 {
17067 	if (imgno != 0)
17068 		return (EINVAL);
17069 
17070 	ddi_ufm_image_set_desc(img, "IOC Firmware");
17071 	ddi_ufm_image_set_nslots(img, 1);
17072 
17073 	return (0);
17074 }
17075 
17076 static int
mptsas_ufm_fill_slot(ddi_ufm_handle_t * ufmh,void * arg,uint_t imgno,uint_t slotno,ddi_ufm_slot_t * slot)17077 mptsas_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
17078     uint_t slotno, ddi_ufm_slot_t *slot)
17079 {
17080 	mptsas_t *mpt = (mptsas_t *)arg;
17081 	char *buf;
17082 
17083 	if (imgno != 0 || slotno != 0 ||
17084 	    ddi_prop_lookup_string(DDI_DEV_T_ANY, mpt->m_dip,
17085 	    DDI_PROP_DONTPASS, "firmware-version", &buf) != DDI_PROP_SUCCESS)
17086 		return (EINVAL);
17087 
17088 	ddi_ufm_slot_set_attrs(slot, DDI_UFM_ATTR_ACTIVE);
17089 	ddi_ufm_slot_set_version(slot, buf);
17090 
17091 	ddi_prop_free(buf);
17092 
17093 	return (0);
17094 }
17095 
17096 static int
mptsas_ufm_getcaps(ddi_ufm_handle_t * ufmh,void * arg,ddi_ufm_cap_t * caps)17097 mptsas_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
17098 {
17099 	*caps = DDI_UFM_CAP_REPORT;
17100 
17101 	return (0);
17102 }
17103