1 /*
2  * This file and its contents are supplied under the terms of the
3  * Common Development and Distribution License ("CDDL"), version 1.0.
4  * You may only use this file in accordance with the terms of version
5  * 1.0 of the CDDL.
6  *
7  * A full copy of the text of the CDDL should have accompanied this
8  * source.  A copy of the CDDL is also available via the Internet at
9  * http://www.illumos.org/license/CDDL.
10  */
11 
12 /*
13  * Copyright 2024 Racktop Systems, Inc.
14  */
15 
16 /*
17  * This file implements the basic HBA interface to SCSAv3.
18  *
19  * For target initialization, we'll look up the driver target state by the
20  * device address and set it as HBA private in the struct scsi_device.
21  *
22  * The tran_reset(9e) and tran_abort(9e) entry points are implemented by a
23  * common function that sends the appropriate task management request to the
24  * target, iff the target supports task management requests. There is no support
25  * for bus resets. The case of RESET_ALL is special: sd(4d) issues a RESET_ALL
26  * in sddump() and errors out if that fails, so even if task management is
27  * unsupported by a target or the reset fails for any other reason, we return
28  * success. Any I/O errors due to an unsuccessful reset will be caught later.
29  *
30  * The tran_start(9e) code paths are almost identical for physical and logical
31  * devices, the major difference being that PDs will have the DevHandle in the
32  * MPT I/O frame set to the invalid DevHandle (0xffff), while LDs will use the
33  * target ID. Also, special settings are applied for LDs and PDs in the RAID
34  * context (VendorRegion of the MPT I/O frame). There is no support for fastpath
35  * I/O.
36  *
37  * In tran_setup_pkt(9e), a MPT command is allocated for the scsi_pkt, and its
38  * members are initialized as follows:
39  * - pkt_cdbp will point to the CDB structure embedded in the MPT I/O frame
40  * - pkt_scbp will point to the struct scsi_arq_status in the sense DMA memory
41  *   allocated for the MPT command
42  * - pkt_scblen will be set to the size of the sense DMA memory, minus alignment
43  * - SenseBufferLowAddress and SenseBufferLength in the MPT I/O frame will be
44  *   set to the sense DMA address and length, respectively, adjusted to account
45  *   for the space needed for the ARQ pkt and alignment.
46  * - There is no SenseBufferHighAddress.
47  * - rc_timeout is set to pkt_time, but it is unknown if that has any effect
48  */
49 
50 #include <sys/types.h>
51 #include <sys/ddi.h>
52 #include <sys/sunddi.h>
53 #include <sys/scsi/scsi.h>
54 #include <sys/scsi/adapters/mfi/mfi_pd.h>
55 
56 #include "lmrc.h"
57 #include "lmrc_reg.h"
58 
59 static int lmrc_getcap(struct scsi_address *, char *, int);
60 static int lmrc_setcap(struct scsi_address *, char *, int, int);
61 
62 static int lmrc_tran_tgt_init(dev_info_t *, dev_info_t *,
63     scsi_hba_tran_t *, struct scsi_device *);
64 static void lmrc_tran_tgt_free(dev_info_t *, dev_info_t *,
65     scsi_hba_tran_t *, struct scsi_device *);
66 
67 static int lmrc_tran_abort(struct scsi_address *, struct scsi_pkt *);
68 static int lmrc_tran_reset(struct scsi_address *, int);
69 
70 static int lmrc_tran_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t);
71 static void lmrc_tran_teardown_pkt(struct scsi_pkt *);
72 
73 boolean_t lmrc_relaxed_ordering = B_TRUE;
74 
75 static int
lmrc_getcap(struct scsi_address * sa,char * cap,int whom)76 lmrc_getcap(struct scsi_address *sa, char *cap, int whom)
77 {
78 	struct scsi_device *sd = scsi_address_device(sa);
79 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
80 	lmrc_t *lmrc = tgt->tgt_lmrc;
81 	int index;
82 
83 	VERIFY(lmrc != NULL);
84 
85 	if ((index = scsi_hba_lookup_capstr(cap)) == DDI_FAILURE)
86 		return (-1);
87 
88 	switch (index) {
89 	case SCSI_CAP_CDB_LEN:
90 		return (sizeof (((Mpi25SCSIIORequest_t *)NULL)->CDB.CDB32));
91 
92 	case SCSI_CAP_DMA_MAX:
93 		if (lmrc->l_dma_attr.dma_attr_maxxfer > INT_MAX)
94 			return (INT_MAX);
95 		return (lmrc->l_dma_attr.dma_attr_maxxfer);
96 
97 	case SCSI_CAP_SECTOR_SIZE:
98 		if (lmrc->l_dma_attr.dma_attr_granular > INT_MAX)
99 			return (INT_MAX);
100 		return (lmrc->l_dma_attr.dma_attr_granular);
101 
102 	case SCSI_CAP_INTERCONNECT_TYPE: {
103 		uint8_t interconnect_type;
104 
105 		rw_enter(&tgt->tgt_lock, RW_READER);
106 		interconnect_type = tgt->tgt_interconnect_type;
107 		rw_exit(&tgt->tgt_lock);
108 		return (interconnect_type);
109 	}
110 	case SCSI_CAP_MSG_OUT:
111 	case SCSI_CAP_WIDE_XFER:
112 	case SCSI_CAP_TAGGED_QING:
113 	case SCSI_CAP_UNTAGGED_QING:
114 	case SCSI_CAP_PARITY:
115 	case SCSI_CAP_ARQ:
116 		return (1);
117 
118 	case SCSI_CAP_RESET_NOTIFICATION:
119 	case SCSI_CAP_DISCONNECT:
120 	case SCSI_CAP_SYNCHRONOUS:
121 	case SCSI_CAP_LINKED_CMDS:
122 	case SCSI_CAP_INITIATOR_ID:
123 		return (0);
124 
125 	default:
126 		return (-1);
127 	}
128 }
129 
130 static int
lmrc_setcap(struct scsi_address * sa,char * cap,int value,int whom)131 lmrc_setcap(struct scsi_address *sa, char *cap, int value, int whom)
132 {
133 	struct scsi_device *sd = scsi_address_device(sa);
134 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
135 	lmrc_t *lmrc = tgt->tgt_lmrc;
136 	int index;
137 
138 	VERIFY(lmrc != NULL);
139 
140 	if ((index = scsi_hba_lookup_capstr(cap)) == DDI_FAILURE)
141 		return (-1);
142 
143 	if (whom == 0)
144 		return (-1);
145 
146 	switch (index) {
147 	case SCSI_CAP_DMA_MAX:
148 		if (value <= lmrc->l_dma_attr.dma_attr_maxxfer)
149 			return (1);
150 		else
151 			return (0);
152 
153 	case SCSI_CAP_MSG_OUT:
154 	case SCSI_CAP_WIDE_XFER:
155 	case SCSI_CAP_TAGGED_QING:
156 	case SCSI_CAP_UNTAGGED_QING:
157 	case SCSI_CAP_PARITY:
158 	case SCSI_CAP_ARQ:
159 		if (value == 1)
160 			return (1);
161 		else
162 			return (0);
163 
164 	case SCSI_CAP_RESET_NOTIFICATION:
165 	case SCSI_CAP_DISCONNECT:
166 	case SCSI_CAP_SYNCHRONOUS:
167 	case SCSI_CAP_LINKED_CMDS:
168 	case SCSI_CAP_INITIATOR_ID:
169 		if (value == 0)
170 			return (1);
171 		else
172 			return (0);
173 
174 	case SCSI_CAP_SECTOR_SIZE:
175 	case SCSI_CAP_TOTAL_SECTORS:
176 		return (0);
177 
178 	default:
179 		return (-1);
180 	}
181 }
182 
183 /*
184  * lmrc_tran_tgt_init
185  *
186  * Find the driver target state and link it with the scsi_device.
187  */
188 static int
lmrc_tran_tgt_init(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)189 lmrc_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
190     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
191 {
192 	lmrc_t *lmrc = hba_tran->tran_hba_private;
193 	lmrc_tgt_t *tgt;
194 
195 	VERIFY(lmrc != NULL);
196 
197 	tgt = lmrc_tgt_find(lmrc, sd);
198 	if (tgt == NULL)
199 		return (DDI_FAILURE);
200 
201 	/* lmrc_tgt_find() returns the target read-locked. */
202 	scsi_device_hba_private_set(sd, tgt);
203 	rw_exit(&tgt->tgt_lock);
204 
205 
206 	return (DDI_SUCCESS);
207 }
208 
209 static void
lmrc_tran_tgt_free(dev_info_t * hba_dip,dev_info_t * tgt_dip,scsi_hba_tran_t * hba_tran,struct scsi_device * sd)210 lmrc_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
211     scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
212 {
213 	scsi_device_hba_private_set(sd, NULL);
214 }
215 
216 /*
217  * lmrc_tran_start
218  *
219  * Start I/O of a scsi_pkt. Set up the MPT frame, the RAID context and if
220  * necessary the SGL for the transfer. Wait for a reply if this is polled I/O.
221  *
222  * There are subtle differences in the way I/O is done for LDs and PDs.
223  *
224  * There is no support for fastpath I/O.
225  */
226 static int
lmrc_tran_start(struct scsi_address * sa,struct scsi_pkt * pkt)227 lmrc_tran_start(struct scsi_address *sa, struct scsi_pkt *pkt)
228 {
229 	Mpi25SCSIIORequest_t *io_req;
230 	lmrc_atomic_req_desc_t req_desc;
231 	lmrc_raidctx_g35_t *rc;
232 	struct scsi_device *sd;
233 	lmrc_scsa_cmd_t *cmd;
234 	lmrc_mpt_cmd_t *mpt;
235 	lmrc_tgt_t *tgt;
236 	lmrc_t *lmrc;
237 	uint8_t req_flags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
238 	boolean_t intr = (pkt->pkt_flags & FLAG_NOINTR) == 0;
239 	int ret = TRAN_BADPKT;
240 
241 	/*
242 	 * FLAG_NOINTR was set but we're not panicked. This may theoretically
243 	 * happen if scsi_transport() is called from an interrupt thread, and
244 	 * we don't support this.
245 	 */
246 	if (!intr && !ddi_in_panic())
247 		return (ret);
248 
249 	sd = scsi_address_device(sa);
250 	VERIFY(sd != NULL);
251 
252 	tgt = scsi_device_hba_private_get(sd);
253 	VERIFY(tgt != NULL);
254 
255 	cmd = pkt->pkt_ha_private;
256 	VERIFY(cmd != NULL);
257 
258 	VERIFY(cmd->sc_tgt == tgt);
259 
260 	lmrc = tgt->tgt_lmrc;
261 	VERIFY(lmrc != NULL);
262 
263 	if (lmrc->l_fw_fault)
264 		return (TRAN_FATAL_ERROR);
265 
266 	if (atomic_inc_uint_nv(&lmrc->l_fw_outstanding_cmds) >
267 	    lmrc->l_max_scsi_cmds) {
268 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
269 		return (TRAN_BUSY);
270 	}
271 
272 	rw_enter(&tgt->tgt_lock, RW_READER);
273 
274 	mpt = cmd->sc_mpt;
275 	VERIFY(mpt != NULL);
276 	mutex_enter(&mpt->mpt_lock);
277 
278 	io_req = mpt->mpt_io_frame;
279 
280 	io_req->Function = LMRC_MPI2_FUNCTION_LD_IO_REQUEST;
281 
282 	rc = &io_req->VendorRegion;
283 	rc->rc_ld_tgtid = tgt->tgt_dev_id;
284 
285 	if (tgt->tgt_pd_info == NULL) {
286 		/* This is LD I/O */
287 		io_req->DevHandle = tgt->tgt_dev_id;
288 
289 		if (lmrc_cmd_is_rw(pkt->pkt_cdbp[0])) {
290 			rc->rc_type = MPI2_TYPE_CUDA;
291 			rc->rc_nseg = 1;
292 			rc->rc_routing_flags.rf_sqn = 1;
293 		}
294 	} else {
295 		/* This is PD I/O */
296 		io_req->DevHandle = LMRC_DEVHDL_INVALID;
297 		rc->rc_raid_flags.rf_io_subtype = LMRC_RF_IO_SUBTYPE_SYSTEM_PD;
298 
299 		if (tgt->tgt_type == DTYPE_DIRECT &&
300 		    lmrc->l_use_seqnum_jbod_fp) {
301 			mfi_pd_cfg_t *pdcfg;
302 
303 			rw_enter(&lmrc->l_pdmap_lock, RW_READER);
304 			pdcfg = &lmrc->l_pdmap->pm_pdcfg[tgt->tgt_dev_id];
305 
306 			if (lmrc->l_pdmap_tgtid_support)
307 				rc->rc_ld_tgtid = pdcfg->pd_tgtid;
308 
309 			rc->rc_cfg_seqnum = pdcfg->pd_seqnum;
310 			io_req->DevHandle = pdcfg->pd_devhdl;
311 			rw_exit(&lmrc->l_pdmap_lock);
312 
313 			if (lmrc_cmd_is_rw(pkt->pkt_cdbp[0])) {
314 				/*
315 				 * MPI2_TYPE_CUDA is valid only if FW supports
316 				 * JBOD Sequence number
317 				 */
318 				rc->rc_type = MPI2_TYPE_CUDA;
319 				rc->rc_nseg = 1;
320 				rc->rc_routing_flags.rf_sqn = 1;
321 
322 				io_req->Function =
323 				    MPI2_FUNCTION_SCSI_IO_REQUEST;
324 				io_req->IoFlags |=
325 				    MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH;
326 				req_flags =
327 				    MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
328 			}
329 		}
330 
331 	}
332 
333 	if (pkt->pkt_numcookies > 0) {
334 		if ((pkt->pkt_dma_flags & DDI_DMA_READ) != 0)
335 			io_req->Control |= MPI2_SCSIIO_CONTROL_READ;
336 
337 		if ((pkt->pkt_dma_flags & DDI_DMA_WRITE) != 0)
338 			io_req->Control |= MPI2_SCSIIO_CONTROL_WRITE;
339 
340 		lmrc_dma_build_sgl(lmrc, mpt, pkt->pkt_cookies,
341 		    pkt->pkt_numcookies);
342 
343 		io_req->DataLength = pkt->pkt_dma_len;
344 
345 		rc->rc_num_sge = pkt->pkt_numcookies;
346 	}
347 
348 	VERIFY3S(ddi_dma_sync(lmrc->l_ioreq_dma.ld_hdl,
349 	    (void *)io_req - lmrc->l_ioreq_dma.ld_buf,
350 	    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, DDI_DMA_SYNC_FORDEV),
351 	    ==, DDI_SUCCESS);
352 
353 	req_desc = lmrc_build_atomic_request(lmrc, mpt, req_flags);
354 
355 	mpt->mpt_timeout = gethrtime() + pkt->pkt_time * NANOSEC;
356 	lmrc_send_atomic_request(lmrc, req_desc);
357 
358 	if (intr) {
359 		/* normal interrupt driven I/O processing */
360 		lmrc_tgt_add_active_mpt(tgt, mpt);
361 		ret = TRAN_ACCEPT;
362 	} else {
363 		/* FLAG_NOINTR was set and we're panicked */
364 		VERIFY(ddi_in_panic());
365 
366 		ret = lmrc_poll_for_reply(lmrc, mpt);
367 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
368 	}
369 
370 	mutex_exit(&mpt->mpt_lock);
371 	rw_exit(&tgt->tgt_lock);
372 
373 	return (ret);
374 }
375 
376 /*
377  * lmrc_task_mgmt
378  *
379  * Send a TASK MGMT command to a target, provied it is TM capable.
380  */
381 static int
lmrc_task_mgmt(lmrc_t * lmrc,lmrc_tgt_t * tgt,uint8_t type,uint16_t smid)382 lmrc_task_mgmt(lmrc_t *lmrc, lmrc_tgt_t *tgt, uint8_t type, uint16_t smid)
383 {
384 	Mpi2SCSITaskManagementRequest_t *tm_req;
385 	Mpi2SCSITaskManagementReply_t *tm_reply;
386 	uint64_t *pd_ld_flags;
387 	lmrc_atomic_req_desc_t req_desc;
388 	lmrc_mpt_cmd_t *mpt;
389 	clock_t ret;
390 	boolean_t tm_capable;
391 
392 	rw_enter(&tgt->tgt_lock, RW_READER);
393 
394 	/* Make sure the target can handle task mgmt commands. */
395 	if (tgt->tgt_pd_info == NULL) {
396 		tm_capable = lmrc_ld_tm_capable(lmrc, tgt->tgt_dev_id);
397 	} else {
398 		tm_capable = lmrc_pd_tm_capable(lmrc, tgt->tgt_dev_id);
399 	}
400 
401 	if (!tm_capable) {
402 		rw_exit(&tgt->tgt_lock);
403 		return (0);
404 	}
405 
406 	if (atomic_inc_uint_nv(&lmrc->l_fw_outstanding_cmds) >
407 	    lmrc->l_max_scsi_cmds) {
408 		rw_exit(&tgt->tgt_lock);
409 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
410 		return (0);
411 	}
412 
413 	mpt = lmrc_get_mpt(lmrc);
414 	if (mpt == NULL) {
415 		rw_exit(&tgt->tgt_lock);
416 		atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
417 		return (0);
418 	}
419 	ASSERT(mutex_owned(&mpt->mpt_lock));
420 
421 
422 	bzero(mpt->mpt_io_frame, LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE);
423 	tm_req = mpt->mpt_io_frame;
424 	tm_reply = mpt->mpt_io_frame + 128;
425 	pd_ld_flags = (uint64_t *)tm_reply;
426 
427 
428 	tm_req->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
429 	tm_req->TaskType = type;
430 	tm_req->TaskMID = smid;
431 	tm_req->DevHandle = tgt->tgt_dev_id;
432 
433 	/*
434 	 * The uint32_t immediately following the MPI2 task management request
435 	 * contains two flags indicating whether the target is a LD or PD.
436 	 */
437 	if (tgt->tgt_pd_info == NULL)
438 		*pd_ld_flags = 1<<0;
439 	else
440 		*pd_ld_flags = 1<<1;
441 
442 	VERIFY3S(ddi_dma_sync(lmrc->l_ioreq_dma.ld_hdl,
443 	    (void *)tm_req - lmrc->l_ioreq_dma.ld_buf,
444 	    LMRC_MPI2_RAID_DEFAULT_IO_FRAME_SIZE, DDI_DMA_SYNC_FORDEV),
445 	    ==, DDI_SUCCESS);
446 
447 	req_desc = lmrc_build_atomic_request(lmrc, mpt,
448 	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY);
449 
450 	lmrc_send_atomic_request(lmrc, req_desc);
451 
452 	/* Poll for completion if we're called while the system is panicked. */
453 	if (ddi_in_panic()) {
454 		ret = lmrc_poll_for_reply(lmrc, mpt);
455 	} else {
456 		clock_t timeout = drv_usectohz(LMRC_RESET_WAIT_TIME * MICROSEC);
457 
458 		timeout += ddi_get_lbolt();
459 		do {
460 			ret = cv_timedwait(&mpt->mpt_cv, &mpt->mpt_lock,
461 			    timeout);
462 		} while (mpt->mpt_complete == B_FALSE && ret != -1);
463 	}
464 
465 	atomic_dec_uint(&lmrc->l_fw_outstanding_cmds);
466 	lmrc_put_mpt(mpt);
467 	rw_exit(&tgt->tgt_lock);
468 
469 	if (ret >= 0)
470 		return (1);
471 	else
472 		return (-1);
473 }
474 
475 /*
476  * lmrc_abort_mpt
477  *
478  * Abort a MPT command by sending a TASK MGMT ABORT TASK command.
479  */
480 int
lmrc_abort_mpt(lmrc_t * lmrc,lmrc_tgt_t * tgt,lmrc_mpt_cmd_t * mpt)481 lmrc_abort_mpt(lmrc_t *lmrc, lmrc_tgt_t *tgt, lmrc_mpt_cmd_t *mpt)
482 {
483 	ASSERT(mutex_owned(&tgt->tgt_mpt_active_lock));
484 	ASSERT(mutex_owned(&mpt->mpt_lock));
485 
486 	return (lmrc_task_mgmt(lmrc, tgt, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
487 	    mpt->mpt_smid));
488 }
489 
490 /*
491  * lmrc_tran_abort
492  *
493  * Send a SCSI TASK MGMT request to abort a packet.
494  */
495 static int
lmrc_tran_abort(struct scsi_address * sa,struct scsi_pkt * pkt)496 lmrc_tran_abort(struct scsi_address *sa, struct scsi_pkt *pkt)
497 {
498 	struct scsi_device *sd = scsi_address_device(sa);
499 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
500 	lmrc_t *lmrc = tgt->tgt_lmrc;
501 	lmrc_scsa_cmd_t *cmd;
502 	lmrc_mpt_cmd_t *mpt;
503 	int ret = 0;
504 
505 	VERIFY(lmrc != NULL);
506 
507 	if (lmrc->l_fw_fault)
508 		return (0);
509 
510 	/*
511 	 * If no pkt was given, abort all outstanding pkts for this target.
512 	 */
513 	if (pkt == NULL) {
514 		mutex_enter(&tgt->tgt_mpt_active_lock);
515 		for (mpt = lmrc_tgt_first_active_mpt(tgt);
516 		    mpt != NULL;
517 		    mpt = lmrc_tgt_next_active_mpt(tgt, mpt)) {
518 			ASSERT(mutex_owned(&mpt->mpt_lock));
519 			if (mpt->mpt_complete)
520 				continue;
521 			if (mpt->mpt_pkt == NULL)
522 				continue;
523 
524 			if (lmrc_abort_mpt(lmrc, tgt, mpt) > 0)
525 				ret = 1;
526 		}
527 		mutex_exit(&tgt->tgt_mpt_active_lock);
528 
529 		return (ret);
530 	}
531 
532 	cmd = pkt->pkt_ha_private;
533 
534 	VERIFY(cmd != NULL);
535 	VERIFY(cmd->sc_tgt == tgt);
536 
537 	mpt = cmd->sc_mpt;
538 	VERIFY(mpt != NULL);
539 
540 	mutex_enter(&mpt->mpt_lock);
541 	ret = lmrc_abort_mpt(lmrc, tgt, mpt);
542 	mutex_exit(&mpt->mpt_lock);
543 
544 	if (ret == -1) {
545 		dev_err(lmrc->l_dip, CE_WARN, "!target reset timed out, "
546 		    "tgt %d", tgt->tgt_dev_id);
547 		return (0);
548 	}
549 
550 	return (ret);
551 }
552 
553 /*
554  * lmrc_tran_reset
555  *
556  * Reset a target. There's no support for RESET_LUN or RESET_ALL.
557  */
558 static int
lmrc_tran_reset(struct scsi_address * sa,int level)559 lmrc_tran_reset(struct scsi_address *sa, int level)
560 {
561 	struct scsi_device *sd = scsi_address_device(sa);
562 	lmrc_tgt_t *tgt = scsi_device_hba_private_get(sd);
563 	lmrc_t *lmrc = tgt->tgt_lmrc;
564 	int ret = 0;
565 
566 	VERIFY(lmrc != NULL);
567 
568 	if (lmrc->l_fw_fault)
569 		return (0);
570 
571 	switch (level) {
572 	case RESET_ALL:
573 	case RESET_LUN:
574 	case RESET_TARGET:
575 		rw_enter(&tgt->tgt_lock, RW_READER);
576 		ret = lmrc_task_mgmt(lmrc, tgt,
577 		    MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0);
578 		rw_exit(&tgt->tgt_lock);
579 
580 		if (ret == -1) {
581 			dev_err(lmrc->l_dip, CE_WARN,
582 			    "!target reset timed out, tgt %d",
583 			    tgt->tgt_dev_id);
584 			return (0);
585 		}
586 
587 		break;
588 	}
589 
590 	/*
591 	 * Fake a successful return in the case of RESET_ALL for the benefit of
592 	 * being able to save kernel core dumps. sddump() wants to reset the
593 	 * device and errors out if that fails, even if that happens not because
594 	 * of an error but because of a reset not being supported.
595 	 */
596 	if (ret == 0 && level == RESET_ALL)
597 		ret = 1;
598 
599 	return (ret);
600 }
601 
602 /*
603  * lmrc_tran_setup_pkt
604  *
605  * Set up a MPT command for a scsi_pkt, and initialize scsi_pkt members as
606  * needed:
607  * - pkt_cdbp will point to the CDB structure embedded in the MPT I/O frame
608  * - pkt_scbp will point to the struct scsi_arq_status in the sense DMA memory
609  *   allocated for the MPT command
610  * - pkt_scblen will be set to the size of the sense DMA memory, minus alignment
611  * - SenseBufferLowAddress and SenseBufferLength in the MPT I/O frame will be
612  *   set to the sense DMA address and length, respectively, adjusted to account
613  *   for the space needed for the ARQ pkt and alignment.
614  * - There is no SenseBufferHighAddress.
615  * - rc_timeout is set to pkt_time, but it is unknown if that has any effect
616  *
617  * The procedure is the same irrespective of whether the command is sent to a
618  * physical device or RAID volume.
619  */
620 static int
lmrc_tran_setup_pkt(struct scsi_pkt * pkt,int (* callback)(caddr_t),caddr_t arg)621 lmrc_tran_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t),
622     caddr_t arg)
623 {
624 	struct scsi_address *sa;
625 	struct scsi_device *sd;
626 	lmrc_tgt_t *tgt;
627 	lmrc_t *lmrc;
628 	lmrc_scsa_cmd_t *cmd;
629 	lmrc_mpt_cmd_t *mpt;
630 	Mpi25SCSIIORequest_t *io_req;
631 	lmrc_raidctx_g35_t *rc;
632 
633 	if (pkt->pkt_cdblen > sizeof (io_req->CDB.CDB32))
634 		return (-1);
635 
636 	sa = &pkt->pkt_address;
637 	VERIFY(sa != NULL);
638 
639 	sd = scsi_address_device(sa);
640 	VERIFY(sd != NULL);
641 
642 	tgt = scsi_device_hba_private_get(sd);
643 	VERIFY(tgt != NULL);
644 
645 	rw_enter(&tgt->tgt_lock, RW_READER);
646 
647 	lmrc = tgt->tgt_lmrc;
648 	VERIFY(lmrc != NULL);
649 
650 	cmd = pkt->pkt_ha_private;
651 	ASSERT(cmd != NULL);
652 
653 	mpt = lmrc_get_mpt(lmrc);
654 	if (mpt == NULL) {
655 		rw_exit(&tgt->tgt_lock);
656 		return (-1);
657 	}
658 	ASSERT(mutex_owned(&mpt->mpt_lock));
659 
660 
661 	io_req = mpt->mpt_io_frame;
662 
663 	pkt->pkt_cdbp = io_req->CDB.CDB32;
664 
665 	/* Just the CDB length now, but other flags may be set later. */
666 	io_req->IoFlags = pkt->pkt_cdblen;
667 
668 	/*
669 	 * Set up sense buffer. The DMA memory was setup to holds the whole ARQ
670 	 * structure aligned so that its sts_sensedata is aligned to 64 bytes.
671 	 * Point SenseBufferLowAddress to sts_sensedata and reduce the length
672 	 * accordingly.
673 	 */
674 	pkt->pkt_scbp = mpt->mpt_sense;
675 	pkt->pkt_scblen = lmrc_dma_get_size(&mpt->mpt_sense_dma) - 64 +
676 	    offsetof(struct scsi_arq_status, sts_sensedata);
677 
678 	lmrc_dma_set_addr32(&mpt->mpt_sense_dma,
679 	    &io_req->SenseBufferLowAddress);
680 	io_req->SenseBufferLowAddress +=
681 	    P2ROUNDUP(offsetof(struct scsi_arq_status, sts_sensedata), 64);
682 	io_req->SenseBufferLength = pkt->pkt_scblen -
683 	    offsetof(struct scsi_arq_status, sts_sensedata);
684 
685 	rc = &io_req->VendorRegion;
686 	rc->rc_timeout = pkt->pkt_time;
687 
688 	cmd->sc_mpt = mpt;
689 	cmd->sc_tgt = tgt;
690 	mpt->mpt_pkt = pkt;
691 	mutex_exit(&mpt->mpt_lock);
692 	rw_exit(&tgt->tgt_lock);
693 
694 	return (0);
695 }
696 
697 /*
698  * lmrc_tran_teardown_pkt
699  *
700  * Return the MPT command to the free list. It'll be cleared later before
701  * it is reused.
702  */
703 static void
lmrc_tran_teardown_pkt(struct scsi_pkt * pkt)704 lmrc_tran_teardown_pkt(struct scsi_pkt *pkt)
705 {
706 	lmrc_scsa_cmd_t *cmd;
707 	lmrc_mpt_cmd_t *mpt;
708 
709 	cmd = pkt->pkt_ha_private;
710 	ASSERT(cmd != NULL);
711 
712 	mpt = cmd->sc_mpt;
713 	ASSERT(mpt != NULL);
714 
715 	mutex_enter(&mpt->mpt_lock);
716 	lmrc_put_mpt(mpt);
717 }
718 
719 /*
720  * lmrc_hba_attach
721  *
722  * Set up the HBA functions of lmrc. This is a SAS controller and uses complex
723  * addressing for targets, presenting physical devices (PDs) and RAID volumes
724  * (LD) as separate iports.
725  */
726 int
lmrc_hba_attach(lmrc_t * lmrc)727 lmrc_hba_attach(lmrc_t *lmrc)
728 {
729 	scsi_hba_tran_t	*tran;
730 	ddi_dma_attr_t tran_attr = lmrc->l_dma_attr_32;
731 
732 	tran = scsi_hba_tran_alloc(lmrc->l_dip, SCSI_HBA_CANSLEEP);
733 	if (tran == NULL) {
734 		dev_err(lmrc->l_dip, CE_WARN, "!scsi_hba_tran_alloc failed");
735 		return (DDI_FAILURE);
736 	}
737 
738 	tran->tran_hba_private = lmrc;
739 
740 	tran->tran_tgt_init = lmrc_tran_tgt_init;
741 	tran->tran_tgt_free = lmrc_tran_tgt_free;
742 
743 	tran->tran_tgt_probe = scsi_hba_probe;
744 
745 	tran->tran_start = lmrc_tran_start;
746 	tran->tran_abort = lmrc_tran_abort;
747 	tran->tran_reset = lmrc_tran_reset;
748 
749 	tran->tran_getcap = lmrc_getcap;
750 	tran->tran_setcap = lmrc_setcap;
751 
752 	tran->tran_setup_pkt = lmrc_tran_setup_pkt;
753 	tran->tran_teardown_pkt = lmrc_tran_teardown_pkt;
754 	tran->tran_hba_len = sizeof (lmrc_scsa_cmd_t);
755 	tran->tran_interconnect_type = INTERCONNECT_SAS;
756 
757 	if (lmrc_relaxed_ordering)
758 		tran_attr.dma_attr_flags |= DDI_DMA_RELAXED_ORDERING;
759 	tran_attr.dma_attr_sgllen = lmrc->l_max_num_sge;
760 
761 	if (scsi_hba_attach_setup(lmrc->l_dip, &tran_attr, tran,
762 	    SCSI_HBA_HBA | SCSI_HBA_ADDR_COMPLEX) != DDI_SUCCESS)
763 		goto fail;
764 
765 	lmrc->l_hba_tran = tran;
766 
767 	if (scsi_hba_iport_register(lmrc->l_dip, LMRC_IPORT_RAID) !=
768 	    DDI_SUCCESS)
769 		goto fail;
770 
771 	if (scsi_hba_iport_register(lmrc->l_dip, LMRC_IPORT_PHYS) !=
772 	    DDI_SUCCESS)
773 		goto fail;
774 
775 	return (DDI_SUCCESS);
776 
777 fail:
778 	dev_err(lmrc->l_dip, CE_WARN,
779 	    "!could not attach to SCSA framework");
780 	lmrc_hba_detach(lmrc);
781 
782 	return (DDI_FAILURE);
783 }
784 
785 void
lmrc_hba_detach(lmrc_t * lmrc)786 lmrc_hba_detach(lmrc_t *lmrc)
787 {
788 	if (lmrc->l_hba_tran == NULL)
789 		return;
790 
791 	(void) scsi_hba_detach(lmrc->l_dip);
792 	scsi_hba_tran_free(lmrc->l_hba_tran);
793 	lmrc->l_hba_tran = NULL;
794 }
795