xref: /netbsd/sys/dev/i2o/iopsp.c (revision 8402ad45)
1 /*	$NetBSD: iopsp.c,v 1.41 2022/05/04 07:48:34 andvar Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Raw SCSI device support for I2O.  IOPs present SCSI devices individually;
34  * we group them by controlling port.
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: iopsp.c,v 1.41 2022/05/04 07:48:34 andvar Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/kernel.h>
43 #include <sys/device.h>
44 #include <sys/queue.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/endian.h>
48 #include <sys/malloc.h>
49 #include <sys/scsiio.h>
50 
51 #include <sys/bswap.h>
52 #include <sys/bus.h>
53 
54 #include <dev/scsipi/scsi_all.h>
55 #include <dev/scsipi/scsi_disk.h>
56 #include <dev/scsipi/scsipi_all.h>
57 #include <dev/scsipi/scsiconf.h>
58 #include <dev/scsipi/scsi_message.h>
59 
60 #include <dev/i2o/i2o.h>
61 #include <dev/i2o/iopio.h>
62 #include <dev/i2o/iopvar.h>
63 #include <dev/i2o/iopspvar.h>
64 
65 static void	iopsp_adjqparam(device_t, int);
66 static void	iopsp_attach(device_t, device_t, void *);
67 static void	iopsp_intr(device_t, struct iop_msg *, void *);
68 static int	iopsp_ioctl(struct scsipi_channel *, u_long,
69 			    void *, int, struct proc *);
70 static int	iopsp_match(device_t, cfdata_t, void *);
71 static int	iopsp_rescan(struct iopsp_softc *);
72 static int	iopsp_reconfig(device_t);
73 static void	iopsp_scsipi_request(struct scsipi_channel *,
74 				     scsipi_adapter_req_t, void *);
75 
76 CFATTACH_DECL_NEW(iopsp, sizeof(struct iopsp_softc),
77     iopsp_match, iopsp_attach, NULL, NULL);
78 
79 /*
80  * Match a supported device.
81  */
82 static int
iopsp_match(device_t parent,cfdata_t match,void * aux)83 iopsp_match(device_t parent, cfdata_t match, void *aux)
84 {
85 	struct iop_attach_args *ia;
86 	struct iop_softc *iop;
87 	struct {
88 		struct	i2o_param_op_results pr;
89 		struct	i2o_param_read_results prr;
90 		struct	i2o_param_hba_ctlr_info ci;
91 	} __packed param;
92 
93 	ia = aux;
94 	iop = device_private(parent);
95 
96 	if (ia->ia_class != I2O_CLASS_BUS_ADAPTER_PORT)
97 		return (0);
98 
99 	if (iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_CTLR_INFO, &param,
100 	    sizeof(param), NULL) != 0)
101 		return (0);
102 
103 	return (param.ci.bustype == I2O_HBA_BUS_SCSI ||
104 	    param.ci.bustype == I2O_HBA_BUS_FCA);
105 }
106 
107 /*
108  * Attach a supported device.
109  */
110 static void
iopsp_attach(device_t parent,device_t self,void * aux)111 iopsp_attach(device_t parent, device_t self, void *aux)
112 {
113 	struct iop_attach_args *ia;
114 	struct iopsp_softc *sc;
115 	struct iop_softc *iop;
116 	struct {
117 		struct	i2o_param_op_results pr;
118 		struct	i2o_param_read_results prr;
119 		union {
120 			struct	i2o_param_hba_ctlr_info ci;
121 			struct	i2o_param_hba_scsi_ctlr_info sci;
122 			struct	i2o_param_hba_scsi_port_info spi;
123 		} p;
124 	} __packed param;
125 	int fc, rv;
126 	int size;
127 
128 	ia = (struct iop_attach_args *)aux;
129 	sc = device_private(self);
130 	iop = device_private(parent);
131 
132 	/* Register us as an initiator. */
133 	sc->sc_ii.ii_dv = self;
134 	sc->sc_ii.ii_intr = iopsp_intr;
135 	sc->sc_ii.ii_flags = 0;
136 	sc->sc_ii.ii_tid = ia->ia_tid;
137 	sc->sc_ii.ii_reconfig = iopsp_reconfig;
138 	sc->sc_ii.ii_adjqparam = iopsp_adjqparam;
139 	iop_initiator_register(iop, &sc->sc_ii);
140 
141 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_CTLR_INFO,
142 	    &param, sizeof(param), NULL);
143 	if (rv != 0)
144 		goto bad;
145 
146 	fc = (param.p.ci.bustype == I2O_HBA_BUS_FCA);
147 
148 	/*
149 	 * Say what the device is.  If we can find out what the controlling
150 	 * device is, say what that is too.
151 	 */
152 	aprint_normal(": SCSI port");
153 	iop_print_ident(iop, ia->ia_tid);
154 	aprint_normal("\n");
155 
156 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_HBA_SCSI_CTLR_INFO,
157 	    &param, sizeof(param), NULL);
158 	if (rv != 0)
159 		goto bad;
160 
161 	aprint_normal_dev(sc->sc_dev, "");
162 	if (fc)
163 		aprint_normal("FC");
164 	else
165 		aprint_normal("%d-bit", param.p.sci.maxdatawidth);
166 	aprint_normal(", max sync rate %dMHz, initiator ID %d\n",
167 	    (u_int32_t)le64toh(param.p.sci.maxsyncrate) / 1000,
168 	    le32toh(param.p.sci.initiatorid));
169 
170 	sc->sc_openings = 1;
171 
172 	sc->sc_adapter.adapt_dev = sc->sc_dev;
173 	sc->sc_adapter.adapt_nchannels = 1;
174 	sc->sc_adapter.adapt_openings = 1;
175 	sc->sc_adapter.adapt_max_periph = 1;
176 	sc->sc_adapter.adapt_ioctl = iopsp_ioctl;
177 	sc->sc_adapter.adapt_minphys = minphys;
178 	sc->sc_adapter.adapt_request = iopsp_scsipi_request;
179 
180 	memset(&sc->sc_channel, 0, sizeof(sc->sc_channel));
181 	sc->sc_channel.chan_adapter = &sc->sc_adapter;
182 	sc->sc_channel.chan_bustype = &scsi_bustype;
183 	sc->sc_channel.chan_channel = 0;
184 	sc->sc_channel.chan_ntargets = fc ?
185 	    IOPSP_MAX_FC_TARGET : param.p.sci.maxdatawidth;
186 	sc->sc_channel.chan_nluns = IOPSP_MAX_LUN;
187 	sc->sc_channel.chan_id = le32toh(param.p.sci.initiatorid);
188 	sc->sc_channel.chan_flags = SCSIPI_CHAN_NOSETTLE;
189 
190 	/*
191 	 * Allocate the target map.  Currently used for informational
192 	 * purposes only.
193 	 */
194 	size = sc->sc_channel.chan_ntargets * sizeof(struct iopsp_target);
195 	sc->sc_targetmap = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
196 
197  	/* Build the two maps, and attach to scsipi. */
198 	if (iopsp_reconfig(self) != 0) {
199 		aprint_error_dev(sc->sc_dev, "configure failed\n");
200 		goto bad;
201 	}
202 	config_found(self, &sc->sc_channel, scsiprint, CFARGS_NONE);
203 	return;
204 
205  bad:
206 	iop_initiator_unregister(iop, &sc->sc_ii);
207 }
208 
209 /*
210  * Scan the LCT to determine which devices we control, and enter them into
211  * the maps.
212  */
213 static int
iopsp_reconfig(device_t dv)214 iopsp_reconfig(device_t dv)
215 {
216 	struct iopsp_softc *sc;
217 	struct iop_softc *iop;
218 	struct i2o_lct_entry *le;
219 	struct scsipi_channel *sc_chan;
220 	struct {
221 		struct	i2o_param_op_results pr;
222 		struct	i2o_param_read_results prr;
223 		struct	i2o_param_scsi_device_info sdi;
224 	} __packed param;
225 	u_int tid, nent, i, targ, lun, size, rv, bptid;
226 	u_short *tidmap;
227 	void *tofree;
228 	struct iopsp_target *it;
229 	int syncrate;
230 
231 	sc = device_private(dv);
232 	iop = device_private(device_parent(sc->sc_dev));
233 	sc_chan = &sc->sc_channel;
234 
235 	KASSERT(mutex_owned(&iop->sc_conflock));
236 
237 	/* Anything to do? */
238 	if (iop->sc_chgind == sc->sc_chgind)
239 		return (0);
240 
241 	/*
242 	 * Allocate memory for the target/LUN -> TID map.  Use zero to
243 	 * denote absent targets (zero is the TID of the I2O executive,
244 	 * and we never address that here).
245 	 */
246 	size = sc_chan->chan_ntargets * (IOPSP_MAX_LUN) * sizeof(u_short);
247 	if ((tidmap = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL)
248 		return (ENOMEM);
249 
250 	for (i = 0; i < sc_chan->chan_ntargets; i++)
251 		sc->sc_targetmap[i].it_flags &= ~IT_PRESENT;
252 
253 	/*
254 	 * A quick hack to handle Intel's stacked bus port arrangement.
255 	 */
256 	bptid = sc->sc_ii.ii_tid;
257 	nent = iop->sc_nlctent;
258 	for (le = iop->sc_lct->entry; nent != 0; nent--, le++)
259 		if ((le16toh(le->classid) & 4095) ==
260 		    I2O_CLASS_BUS_ADAPTER_PORT &&
261 		    (le32toh(le->usertid) & 4095) == bptid) {
262 			bptid = le16toh(le->localtid) & 4095;
263 			break;
264 		}
265 
266 	nent = iop->sc_nlctent;
267 	for (i = 0, le = iop->sc_lct->entry; i < nent; i++, le++) {
268 		if ((le16toh(le->classid) & 4095) != I2O_CLASS_SCSI_PERIPHERAL)
269 			continue;
270 		if (((le32toh(le->usertid) >> 12) & 4095) != bptid)
271 			continue;
272 		tid = le16toh(le->localtid) & 4095;
273 
274 		rv = iop_field_get_all(iop, tid, I2O_PARAM_SCSI_DEVICE_INFO,
275 		    &param, sizeof(param), NULL);
276 		if (rv != 0)
277 			continue;
278 		targ = le32toh(param.sdi.identifier);
279 		lun = param.sdi.luninfo[1];
280 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
281 		if (targ >= sc_chan->chan_ntargets ||
282 		    lun >= sc_chan->chan_nluns) {
283 			aprint_error_dev(sc->sc_dev, "target %d,%d (tid %d): "
284 			    "bad target/LUN\n", targ, lun, tid);
285 			continue;
286 		}
287 #endif
288 
289 		/*
290 		 * If we've already described this target, and nothing has
291 		 * changed, then don't describe it again.
292 		 */
293 		it = &sc->sc_targetmap[targ];
294 		it->it_flags |= IT_PRESENT;
295 		syncrate = ((int)le64toh(param.sdi.negsyncrate) + 500) / 1000;
296 		if (it->it_width != param.sdi.negdatawidth ||
297 		    it->it_offset != param.sdi.negoffset ||
298 		    it->it_syncrate != syncrate) {
299 			it->it_width = param.sdi.negdatawidth;
300 			it->it_offset = param.sdi.negoffset;
301 			it->it_syncrate = syncrate;
302 
303 			aprint_verbose_dev(sc->sc_dev, "target %d (tid %d): %d-bit, ",
304 			    targ, tid, it->it_width);
305 			if (it->it_syncrate == 0)
306 				aprint_verbose("asynchronous\n");
307 			else
308 				aprint_verbose("synchronous at %dMHz, "
309 				    "offset 0x%x\n", it->it_syncrate,
310 				    it->it_offset);
311 		}
312 
313 		/* Ignore the device if it's in use by somebody else. */
314 		if ((le32toh(le->usertid) & 4095) != I2O_TID_NONE) {
315 			if (sc->sc_tidmap == NULL ||
316 			    IOPSP_TIDMAP(sc->sc_tidmap, targ, lun) !=
317 			    IOPSP_TID_INUSE) {
318 				aprint_verbose_dev(sc->sc_dev, "target %d,%d (tid %d): "
319 				    "in use by tid %d\n",
320 				    targ, lun, tid,
321 				    le32toh(le->usertid) & 4095);
322 			}
323 			IOPSP_TIDMAP(tidmap, targ, lun) = IOPSP_TID_INUSE;
324 		} else
325 			IOPSP_TIDMAP(tidmap, targ, lun) = (u_short)tid;
326 	}
327 
328 	for (i = 0; i < sc_chan->chan_ntargets; i++)
329 		if ((sc->sc_targetmap[i].it_flags & IT_PRESENT) == 0)
330 			sc->sc_targetmap[i].it_width = 0;
331 
332 	/* Swap in the new map and return. */
333 	mutex_spin_enter(&iop->sc_intrlock);
334 	tofree = sc->sc_tidmap;
335 	sc->sc_tidmap = tidmap;
336 	mutex_spin_exit(&iop->sc_intrlock);
337 
338 	if (tofree != NULL)
339 		free(tofree, M_DEVBUF);
340 	sc->sc_chgind = iop->sc_chgind;
341 	return (0);
342 }
343 
344 /*
345  * Re-scan the bus; to be called from a higher level (e.g. scsipi).
346  */
347 static int
iopsp_rescan(struct iopsp_softc * sc)348 iopsp_rescan(struct iopsp_softc *sc)
349 {
350 	struct iop_softc *iop;
351 	struct iop_msg *im;
352 	struct i2o_hba_bus_scan mf;
353 	int rv;
354 
355 	iop = device_private(device_parent(sc->sc_dev));
356 
357 	mutex_enter(&iop->sc_conflock);
358 	im = iop_msg_alloc(iop, IM_WAIT);
359 
360 	mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
361 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_HBA_BUS_SCAN);
362 	mf.msgictx = sc->sc_ii.ii_ictx;
363 	mf.msgtctx = im->im_tctx;
364 
365 	rv = iop_msg_post(iop, im, &mf, 5*60*1000);
366 	iop_msg_free(iop, im);
367 	if (rv != 0)
368 		aprint_error_dev(sc->sc_dev, "bus rescan failed (error %d)\n",
369 		    rv);
370 
371 	if ((rv = iop_lct_get(iop)) == 0)
372 		rv = iopsp_reconfig(sc->sc_dev);
373 
374 	mutex_exit(&iop->sc_conflock);
375 	return (rv);
376 }
377 
378 /*
379  * Start a SCSI command.
380  */
381 static void
iopsp_scsipi_request(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)382 iopsp_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
383 		     void *arg)
384 {
385 	struct scsipi_xfer *xs;
386 	struct scsipi_periph *periph;
387 	struct iopsp_softc *sc;
388 	struct iop_msg *im;
389 	struct iop_softc *iop;
390 	struct i2o_scsi_scb_exec *mf;
391 	int error, flags, tid;
392 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
393 
394 	sc = device_private(chan->chan_adapter->adapt_dev);
395 	iop = device_private(device_parent(sc->sc_dev));
396 
397 	switch (req) {
398 	case ADAPTER_REQ_RUN_XFER:
399 		xs = arg;
400 		periph = xs->xs_periph;
401 		flags = xs->xs_control;
402 
403 		SC_DEBUG(periph, SCSIPI_DB2, ("iopsp_scsi_request run_xfer\n"));
404 
405 		tid = IOPSP_TIDMAP(sc->sc_tidmap, periph->periph_target,
406 		    periph->periph_lun);
407 		if (tid == IOPSP_TID_ABSENT || tid == IOPSP_TID_INUSE) {
408 			xs->error = XS_SELTIMEOUT;
409 			scsipi_done(xs);
410 			return;
411 		}
412 
413 		/* Need to reset the target? */
414 		if ((flags & XS_CTL_RESET) != 0) {
415 			if (iop_simple_cmd(iop, tid, I2O_SCSI_DEVICE_RESET,
416 			    sc->sc_ii.ii_ictx, 1, 30*1000) != 0) {
417 				aprint_error_dev(sc->sc_dev, "reset failed\n");
418 				xs->error = XS_DRIVER_STUFFUP;
419 			} else
420 				xs->error = XS_NOERROR;
421 
422 			scsipi_done(xs);
423 			return;
424 		}
425 
426 #if defined(I2ODEBUG) || defined(SCSIDEBUG)
427 		if (xs->cmdlen > sizeof(mf->cdb))
428 			panic("%s: CDB too large", device_xname(sc->sc_dev));
429 #endif
430 
431 		im = iop_msg_alloc(iop, IM_POLL_INTR |
432 		    IM_NOSTATUS | ((flags & XS_CTL_POLL) != 0 ? IM_POLL : 0));
433 		im->im_dvcontext = xs;
434 
435 		mf = (struct i2o_scsi_scb_exec *)mb;
436 		mf->msgflags = I2O_MSGFLAGS(i2o_scsi_scb_exec);
437 		mf->msgfunc = I2O_MSGFUNC(tid, I2O_SCSI_SCB_EXEC);
438 		mf->msgictx = sc->sc_ii.ii_ictx;
439 		mf->msgtctx = im->im_tctx;
440 		mf->flags = xs->cmdlen | I2O_SCB_FLAG_ENABLE_DISCONNECT |
441 		    I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
442 		mf->datalen = xs->datalen;
443 		memcpy(mf->cdb, xs->cmd, xs->cmdlen);
444 
445 		switch (xs->xs_tag_type) {
446 		case MSG_ORDERED_Q_TAG:
447 			mf->flags |= I2O_SCB_FLAG_ORDERED_QUEUE_TAG;
448 			break;
449 		case MSG_SIMPLE_Q_TAG:
450 			mf->flags |= I2O_SCB_FLAG_SIMPLE_QUEUE_TAG;
451 			break;
452 		case MSG_HEAD_OF_Q_TAG:
453 			mf->flags |= I2O_SCB_FLAG_HEAD_QUEUE_TAG;
454 			break;
455 		default:
456 			break;
457 		}
458 
459 		if (xs->datalen != 0) {
460 			error = iop_msg_map_bio(iop, im, mb, xs->data,
461 			    xs->datalen, (flags & XS_CTL_DATA_OUT) == 0);
462 			if (error) {
463 				xs->error = XS_DRIVER_STUFFUP;
464 				iop_msg_free(iop, im);
465 				scsipi_done(xs);
466 				return;
467 			}
468 			if ((flags & XS_CTL_DATA_IN) == 0)
469 				mf->flags |= I2O_SCB_FLAG_XFER_TO_DEVICE;
470 			else
471 				mf->flags |= I2O_SCB_FLAG_XFER_FROM_DEVICE;
472 		}
473 
474 		if (iop_msg_post(iop, im, mb, xs->timeout)) {
475 			if (xs->datalen != 0)
476 				iop_msg_unmap(iop, im);
477 			iop_msg_free(iop, im);
478 			xs->error = XS_DRIVER_STUFFUP;
479 			scsipi_done(xs);
480 		}
481 		break;
482 
483 	case ADAPTER_REQ_GROW_RESOURCES:
484 		/*
485 		 * Not supported.
486 		 */
487 		break;
488 
489 	case ADAPTER_REQ_SET_XFER_MODE:
490 		/*
491 		 * The DDM takes care of this, and we can't modify its
492 		 * behaviour.
493 		 */
494 		break;
495 	}
496 }
497 
498 #ifdef notyet
499 /*
500  * Abort the specified I2O_SCSI_SCB_EXEC message and its associated SCB.
501  */
502 static int
iopsp_scsi_abort(struct iopsp_softc * sc,int atid,struct iop_msg * aim)503 iopsp_scsi_abort(struct iopsp_softc *sc, int atid, struct iop_msg *aim)
504 {
505 	struct iop_msg *im;
506 	struct i2o_scsi_scb_abort mf;
507 	struct iop_softc *iop;
508 	int rv, s;
509 
510 	iop = device_private(device_parent(sc->sc_dev));
511 	im = iop_msg_alloc(iop, IM_POLL);
512 
513 	mf.msgflags = I2O_MSGFLAGS(i2o_scsi_scb_abort);
514 	mf.msgfunc = I2O_MSGFUNC(atid, I2O_SCSI_SCB_ABORT);
515 	mf.msgictx = sc->sc_ii.ii_ictx;
516 	mf.msgtctx = im->im_tctx;
517 	mf.tctxabort = aim->im_tctx;
518 
519 	rv = iop_msg_post(iop, im, &mf, 30000);
520 	iop_msg_free(iop, im);
521 
522 	return (rv);
523 }
524 #endif
525 
526 /*
527  * We have a message which has been processed and replied to by the IOP -
528  * deal with it.
529  */
530 static void
iopsp_intr(device_t dv,struct iop_msg * im,void * reply)531 iopsp_intr(device_t dv, struct iop_msg *im, void *reply)
532 {
533 	struct scsipi_xfer *xs;
534 	struct iopsp_softc *sc;
535 	struct i2o_scsi_reply *rb;
536  	struct iop_softc *iop;
537 	u_int sl;
538 
539 	sc = device_private(dv);
540 	xs = im->im_dvcontext;
541 	iop = device_private(device_parent(dv));
542 	rb = reply;
543 
544 	SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("iopsp_intr\n"));
545 
546 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
547 		xs->error = XS_DRIVER_STUFFUP;
548 		xs->resid = xs->datalen;
549 	} else {
550 		if (rb->hbastatus != I2O_SCSI_DSC_SUCCESS) {
551 			switch (rb->hbastatus) {
552 			case I2O_SCSI_DSC_ADAPTER_BUSY:
553 			case I2O_SCSI_DSC_SCSI_BUS_RESET:
554 			case I2O_SCSI_DSC_BUS_BUSY:
555 				xs->error = XS_BUSY;
556 				break;
557 			case I2O_SCSI_DSC_SELECTION_TIMEOUT:
558 				xs->error = XS_SELTIMEOUT;
559 				break;
560 			case I2O_SCSI_DSC_COMMAND_TIMEOUT:
561 			case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
562 			case I2O_SCSI_DSC_LUN_INVALID:
563 			case I2O_SCSI_DSC_SCSI_TID_INVALID:
564 				xs->error = XS_TIMEOUT;
565 				break;
566 			default:
567 				xs->error = XS_DRIVER_STUFFUP;
568 				break;
569 			}
570 			aprint_error_dev(sc->sc_dev, "HBA status 0x%02x\n",
571 			    rb->hbastatus);
572 		} else if (rb->scsistatus != SCSI_OK) {
573 			switch (rb->scsistatus) {
574 			case SCSI_CHECK:
575 				xs->error = XS_SENSE;
576 				sl = le32toh(rb->senselen);
577 				if (sl > sizeof(xs->sense.scsi_sense))
578 					sl = sizeof(xs->sense.scsi_sense);
579 				memcpy(&xs->sense.scsi_sense, rb->sense, sl);
580 				break;
581 			case SCSI_QUEUE_FULL:
582 			case SCSI_BUSY:
583 				xs->error = XS_BUSY;
584 				break;
585 			default:
586 				xs->error = XS_DRIVER_STUFFUP;
587 				break;
588 			}
589 		} else
590 			xs->error = XS_NOERROR;
591 
592 		xs->resid = xs->datalen - le32toh(rb->datalen);
593 		xs->status = rb->scsistatus;
594 	}
595 
596 	/* Free the message wrapper and pass the news to scsipi. */
597 	if (xs->datalen != 0)
598 		iop_msg_unmap(iop, im);
599 	iop_msg_free(iop, im);
600 
601 	scsipi_done(xs);
602 }
603 
604 /*
605  * ioctl hook; used here only to initiate low-level rescans.
606  */
607 static int
iopsp_ioctl(struct scsipi_channel * chan,u_long cmd,void * data,int flag,struct proc * p)608 iopsp_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
609     int flag, struct proc *p)
610 {
611 	int rv;
612 
613 	switch (cmd) {
614 	case SCBUSIOLLSCAN:
615 		/*
616 		 * If it's boot time, the bus will have been scanned and the
617 		 * maps built.  Locking would stop re-configuration, but we
618 		 * want to fake success.
619 		 */
620 		if (curlwp != &lwp0)
621 			rv = iopsp_rescan(
622 			   device_private(chan->chan_adapter->adapt_dev));
623 		else
624 			rv = 0;
625 		break;
626 
627 	default:
628 		rv = ENOTTY;
629 		break;
630 	}
631 
632 	return (rv);
633 }
634 
635 /*
636  * The number of openings available to us has changed, so inform scsipi.
637  */
638 static void
iopsp_adjqparam(device_t dv,int mpi)639 iopsp_adjqparam(device_t dv, int mpi)
640 {
641 	struct iopsp_softc *sc;
642 	struct iop_softc *iop;
643 
644 	sc = device_private(dv);
645 	iop = device_private(device_parent(dv));
646 
647 	mutex_spin_enter(&iop->sc_intrlock);
648 	sc->sc_adapter.adapt_openings += mpi - sc->sc_openings;
649 	sc->sc_openings = mpi;
650 	mutex_spin_exit(&iop->sc_intrlock);
651 }
652