1 /*	$NetBSD: mly.c,v 1.50 2016/07/07 06:55:41 msaitoh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran, Thor Lancelot Simon, and Eric Haszlakiewicz.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c) 2000, 2001 Michael Smith
34  * Copyright (c) 2000 BSDi
35  * All rights reserved.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
47  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
50  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
51  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
52  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
53  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
54  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
55  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
56  * SUCH DAMAGE.
57  *
58  * from FreeBSD: mly.c,v 1.8 2001/07/14 00:12:22 msmith Exp
59  */
60 
61 /*
62  * Driver for the Mylex AcceleRAID and eXtremeRAID family with v6 firmware.
63  *
64  * TODO:
65  *
66  * o Make mly->mly_btl a hash, then MLY_BTL_RESCAN becomes a SIMPLEQ.
67  * o Handle FC and multiple LUNs.
68  * o Fix mmbox usage.
69  * o Fix transfer speed fudge.
70  */
71 
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: mly.c,v 1.50 2016/07/07 06:55:41 msaitoh Exp $");
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/device.h>
78 #include <sys/kernel.h>
79 #include <sys/queue.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82 #include <sys/conf.h>
83 #include <sys/malloc.h>
84 #include <sys/ioctl.h>
85 #include <sys/scsiio.h>
86 #include <sys/kthread.h>
87 #include <sys/kauth.h>
88 
89 #include <sys/bus.h>
90 
91 #include <dev/scsipi/scsi_all.h>
92 #include <dev/scsipi/scsipi_all.h>
93 #include <dev/scsipi/scsiconf.h>
94 
95 #include <dev/pci/pcireg.h>
96 #include <dev/pci/pcivar.h>
97 #include <dev/pci/pcidevs.h>
98 
99 #include <dev/pci/mlyreg.h>
100 #include <dev/pci/mlyio.h>
101 #include <dev/pci/mlyvar.h>
102 #include <dev/pci/mly_tables.h>
103 
104 static void	mly_attach(device_t, device_t, void *);
105 static int	mly_match(device_t, cfdata_t, void *);
106 static const	struct mly_ident *mly_find_ident(struct pci_attach_args *);
107 static int	mly_fwhandshake(struct mly_softc *);
108 static int	mly_flush(struct mly_softc *);
109 static int	mly_intr(void *);
110 static void	mly_shutdown(void *);
111 
112 static int	mly_alloc_ccbs(struct mly_softc *);
113 static void	mly_check_event(struct mly_softc *);
114 static void	mly_complete_event(struct mly_softc *, struct mly_ccb *);
115 static void	mly_complete_rescan(struct mly_softc *, struct mly_ccb *);
116 static int	mly_dmamem_alloc(struct mly_softc *, int, bus_dmamap_t *,
117 				 void **, bus_addr_t *, bus_dma_segment_t *);
118 static void	mly_dmamem_free(struct mly_softc *, int, bus_dmamap_t,
119 				void *, bus_dma_segment_t *);
120 static int	mly_enable_mmbox(struct mly_softc *);
121 static void	mly_fetch_event(struct mly_softc *);
122 static int	mly_get_controllerinfo(struct mly_softc *);
123 static int	mly_get_eventstatus(struct mly_softc *);
124 static int	mly_ioctl(struct mly_softc *, struct mly_cmd_ioctl *,
125 			  void **, size_t, void *, size_t *);
126 static void	mly_padstr(char *, const char *, int);
127 static void	mly_process_event(struct mly_softc *, struct mly_event *);
128 static void	mly_release_ccbs(struct mly_softc *);
129 static int	mly_scan_btl(struct mly_softc *, int, int);
130 static void	mly_scan_channel(struct mly_softc *, int);
131 static void	mly_thread(void *);
132 
133 static int	mly_ccb_alloc(struct mly_softc *, struct mly_ccb **);
134 static void	mly_ccb_complete(struct mly_softc *, struct mly_ccb *);
135 static void	mly_ccb_enqueue(struct mly_softc *, struct mly_ccb *);
136 static void	mly_ccb_free(struct mly_softc *, struct mly_ccb *);
137 static int	mly_ccb_map(struct mly_softc *, struct mly_ccb *);
138 static int	mly_ccb_poll(struct mly_softc *, struct mly_ccb *, int);
139 static int	mly_ccb_submit(struct mly_softc *, struct mly_ccb *);
140 static void	mly_ccb_unmap(struct mly_softc *, struct mly_ccb *);
141 static int	mly_ccb_wait(struct mly_softc *, struct mly_ccb *, int);
142 
143 static void	mly_get_xfer_mode(struct mly_softc *, int,
144 				  struct scsipi_xfer_mode *);
145 static void	mly_scsipi_complete(struct mly_softc *, struct mly_ccb *);
146 static int	mly_scsipi_ioctl(struct scsipi_channel *, u_long, void *,
147 				 int, struct proc *);
148 static void	mly_scsipi_minphys(struct buf *);
149 static void	mly_scsipi_request(struct scsipi_channel *,
150 				   scsipi_adapter_req_t, void *);
151 
152 static int	mly_user_command(struct mly_softc *, struct mly_user_command *);
153 static int	mly_user_health(struct mly_softc *, struct mly_user_health *);
154 
155 extern struct	cfdriver mly_cd;
156 
157 CFATTACH_DECL_NEW(mly, sizeof(struct mly_softc),
158     mly_match, mly_attach, NULL, NULL);
159 
160 dev_type_open(mlyopen);
161 dev_type_close(mlyclose);
162 dev_type_ioctl(mlyioctl);
163 
164 const struct cdevsw mly_cdevsw = {
165 	.d_open = mlyopen,
166 	.d_close = mlyclose,
167 	.d_read = noread,
168 	.d_write = nowrite,
169 	.d_ioctl = mlyioctl,
170 	.d_stop = nostop,
171 	.d_tty = notty,
172 	.d_poll = nopoll,
173 	.d_mmap = nommap,
174 	.d_kqfilter = nokqfilter,
175 	.d_discard = nodiscard,
176 	.d_flag = D_OTHER
177 };
178 
179 static struct mly_ident {
180 	u_short	vendor;
181 	u_short	product;
182 	u_short	subvendor;
183 	u_short	subproduct;
184 	int	hwif;
185 	const char	*desc;
186 } const mly_ident[] = {
187 	{
188 		PCI_VENDOR_MYLEX,
189 		PCI_PRODUCT_MYLEX_EXTREMERAID,
190 		PCI_VENDOR_MYLEX,
191 		0x0040,
192 		MLY_HWIF_STRONGARM,
193 		"eXtremeRAID 2000"
194 	},
195 	{
196 		PCI_VENDOR_MYLEX,
197 		PCI_PRODUCT_MYLEX_EXTREMERAID,
198 		PCI_VENDOR_MYLEX,
199 		0x0030,
200 		MLY_HWIF_STRONGARM,
201 		"eXtremeRAID 3000"
202 	},
203 	{
204 		PCI_VENDOR_MYLEX,
205 		PCI_PRODUCT_MYLEX_ACCELERAID,
206 		PCI_VENDOR_MYLEX,
207 		0x0050,
208 		MLY_HWIF_I960RX,
209 		"AcceleRAID 352"
210 	},
211 	{
212 		PCI_VENDOR_MYLEX,
213 		PCI_PRODUCT_MYLEX_ACCELERAID,
214 		PCI_VENDOR_MYLEX,
215 		0x0052,
216 		MLY_HWIF_I960RX,
217 		"AcceleRAID 170"
218 	},
219 	{
220 		PCI_VENDOR_MYLEX,
221 		PCI_PRODUCT_MYLEX_ACCELERAID,
222 		PCI_VENDOR_MYLEX,
223 		0x0054,
224 		MLY_HWIF_I960RX,
225 		"AcceleRAID 160"
226 	},
227 };
228 
229 static void	*mly_sdh;
230 
231 /*
232  * Try to find a `mly_ident' entry corresponding to this board.
233  */
234 static const struct mly_ident *
mly_find_ident(struct pci_attach_args * pa)235 mly_find_ident(struct pci_attach_args *pa)
236 {
237 	const struct mly_ident *mpi, *maxmpi;
238 	pcireg_t reg;
239 
240 	mpi = mly_ident;
241 	maxmpi = mpi + sizeof(mly_ident) / sizeof(mly_ident[0]);
242 
243 	if (PCI_CLASS(pa->pa_class) == PCI_CLASS_I2O)
244 		return (NULL);
245 
246 	for (; mpi < maxmpi; mpi++) {
247 		if (PCI_VENDOR(pa->pa_id) != mpi->vendor ||
248 		    PCI_PRODUCT(pa->pa_id) != mpi->product)
249 			continue;
250 
251 		if (mpi->subvendor == 0x0000)
252 			return (mpi);
253 
254 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
255 
256 		if (PCI_VENDOR(reg) == mpi->subvendor &&
257 		    PCI_PRODUCT(reg) == mpi->subproduct)
258 			return (mpi);
259 	}
260 
261 	return (NULL);
262 }
263 
264 /*
265  * Match a supported board.
266  */
267 static int
mly_match(device_t parent,cfdata_t cfdata,void * aux)268 mly_match(device_t parent, cfdata_t cfdata, void *aux)
269 {
270 
271 	return (mly_find_ident(aux) != NULL);
272 }
273 
274 /*
275  * Attach a supported board.
276  */
277 static void
mly_attach(device_t parent,device_t self,void * aux)278 mly_attach(device_t parent, device_t self, void *aux)
279 {
280 	struct pci_attach_args *pa;
281 	struct mly_softc *mly;
282 	struct mly_ioctl_getcontrollerinfo *mi;
283 	const struct mly_ident *ident;
284 	pci_chipset_tag_t pc;
285 	pci_intr_handle_t ih;
286 	bus_space_handle_t memh, ioh;
287 	bus_space_tag_t memt, iot;
288 	pcireg_t reg;
289 	const char *intrstr;
290 	int ior, memr, i, rv, state;
291 	struct scsipi_adapter *adapt;
292 	struct scsipi_channel *chan;
293 	char intrbuf[PCI_INTRSTR_LEN];
294 
295 	mly = device_private(self);
296 	mly->mly_dv = self;
297 	pa = aux;
298 	pc = pa->pa_pc;
299 	ident = mly_find_ident(pa);
300 	state = 0;
301 
302 	mly->mly_dmat = pa->pa_dmat;
303 	mly->mly_hwif = ident->hwif;
304 
305 	printf(": Mylex %s\n", ident->desc);
306 
307 	/*
308 	 * Map the PCI register window.
309 	 */
310 	memr = -1;
311 	ior = -1;
312 
313 	for (i = 0x10; i <= 0x14; i += 4) {
314 		reg = pci_conf_read(pa->pa_pc, pa->pa_tag, i);
315 
316 		if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
317 			if (ior == -1 && PCI_MAPREG_IO_SIZE(reg) != 0)
318 				ior = i;
319 		} else {
320 			if (memr == -1 && PCI_MAPREG_MEM_SIZE(reg) != 0)
321 				memr = i;
322 		}
323 	}
324 
325 	if (memr != -1)
326 		if (pci_mapreg_map(pa, memr, PCI_MAPREG_TYPE_MEM, 0,
327 		    &memt, &memh, NULL, NULL))
328 			memr = -1;
329 	if (ior != -1)
330 		if (pci_mapreg_map(pa, ior, PCI_MAPREG_TYPE_IO, 0,
331 		    &iot, &ioh, NULL, NULL))
332 		    	ior = -1;
333 
334 	if (memr != -1) {
335 		mly->mly_iot = memt;
336 		mly->mly_ioh = memh;
337 	} else if (ior != -1) {
338 		mly->mly_iot = iot;
339 		mly->mly_ioh = ioh;
340 	} else {
341 		aprint_error_dev(self, "can't map i/o or memory space\n");
342 		return;
343 	}
344 
345 	/*
346 	 * Enable the device.
347 	 */
348 	reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG);
349 	pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG,
350 	    reg | PCI_COMMAND_MASTER_ENABLE);
351 
352 	/*
353 	 * Map and establish the interrupt.
354 	 */
355 	if (pci_intr_map(pa, &ih)) {
356 		aprint_error_dev(self, "can't map interrupt\n");
357 		return;
358 	}
359 	intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf));
360 	mly->mly_ih = pci_intr_establish(pc, ih, IPL_BIO, mly_intr, mly);
361 	if (mly->mly_ih == NULL) {
362 		aprint_error_dev(self, "can't establish interrupt");
363 		if (intrstr != NULL)
364 			aprint_error(" at %s", intrstr);
365 		aprint_error("\n");
366 		return;
367 	}
368 
369 	if (intrstr != NULL)
370 		aprint_normal_dev(self, "interrupting at %s\n", intrstr);
371 
372 	/*
373 	 * Take care of interface-specific tasks.
374 	 */
375 	switch (mly->mly_hwif) {
376 	case MLY_HWIF_I960RX:
377 		mly->mly_doorbell_true = 0x00;
378 		mly->mly_cmd_mailbox = MLY_I960RX_COMMAND_MAILBOX;
379 		mly->mly_status_mailbox = MLY_I960RX_STATUS_MAILBOX;
380 		mly->mly_idbr = MLY_I960RX_IDBR;
381 		mly->mly_odbr = MLY_I960RX_ODBR;
382 		mly->mly_error_status = MLY_I960RX_ERROR_STATUS;
383 		mly->mly_interrupt_status = MLY_I960RX_INTERRUPT_STATUS;
384 		mly->mly_interrupt_mask = MLY_I960RX_INTERRUPT_MASK;
385 		break;
386 
387 	case MLY_HWIF_STRONGARM:
388 		mly->mly_doorbell_true = 0xff;
389 		mly->mly_cmd_mailbox = MLY_STRONGARM_COMMAND_MAILBOX;
390 		mly->mly_status_mailbox = MLY_STRONGARM_STATUS_MAILBOX;
391 		mly->mly_idbr = MLY_STRONGARM_IDBR;
392 		mly->mly_odbr = MLY_STRONGARM_ODBR;
393 		mly->mly_error_status = MLY_STRONGARM_ERROR_STATUS;
394 		mly->mly_interrupt_status = MLY_STRONGARM_INTERRUPT_STATUS;
395 		mly->mly_interrupt_mask = MLY_STRONGARM_INTERRUPT_MASK;
396 		break;
397 	}
398 
399 	/*
400 	 * Allocate and map the scatter/gather lists.
401 	 */
402 	rv = mly_dmamem_alloc(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
403 	    &mly->mly_sg_dmamap, (void **)&mly->mly_sg,
404 	    &mly->mly_sg_busaddr, &mly->mly_sg_seg);
405 	if (rv) {
406 		printf("%s: unable to allocate S/G maps\n",
407 		    device_xname(self));
408 		goto bad;
409 	}
410 	state++;
411 
412 	/*
413 	 * Allocate and map the memory mailbox.
414 	 */
415 	rv = mly_dmamem_alloc(mly, sizeof(struct mly_mmbox),
416 	    &mly->mly_mmbox_dmamap, (void **)&mly->mly_mmbox,
417 	    &mly->mly_mmbox_busaddr, &mly->mly_mmbox_seg);
418 	if (rv) {
419 		aprint_error_dev(self, "unable to allocate mailboxes\n");
420 		goto bad;
421 	}
422 	state++;
423 
424 	/*
425 	 * Initialise per-controller queues.
426 	 */
427 	SLIST_INIT(&mly->mly_ccb_free);
428 	SIMPLEQ_INIT(&mly->mly_ccb_queue);
429 
430 	/*
431 	 * Disable interrupts before we start talking to the controller.
432 	 */
433 	mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_DISABLE);
434 
435 	/*
436 	 * Wait for the controller to come ready, handshaking with the
437 	 * firmware if required.  This is typically only necessary on
438 	 * platforms where the controller BIOS does not run.
439 	 */
440 	if (mly_fwhandshake(mly)) {
441 		aprint_error_dev(self, "unable to bring controller online\n");
442 		goto bad;
443 	}
444 
445 	/*
446 	 * Allocate initial command buffers, obtain controller feature
447 	 * information, and then reallocate command buffers, since we'll
448 	 * know how many we want.
449 	 */
450 	if (mly_alloc_ccbs(mly)) {
451 		aprint_error_dev(self, "unable to allocate CCBs\n");
452 		goto bad;
453 	}
454 	state++;
455 	if (mly_get_controllerinfo(mly)) {
456 		aprint_error_dev(self, "unable to retrieve controller info\n");
457 		goto bad;
458 	}
459 	mly_release_ccbs(mly);
460 	if (mly_alloc_ccbs(mly)) {
461 		aprint_error_dev(self, "unable to allocate CCBs\n");
462 		state--;
463 		goto bad;
464 	}
465 
466 	/*
467 	 * Get the current event counter for health purposes, populate the
468 	 * initial health status buffer.
469 	 */
470 	if (mly_get_eventstatus(mly)) {
471 		aprint_error_dev(self, "unable to retrieve event status\n");
472 		goto bad;
473 	}
474 
475 	/*
476 	 * Enable memory-mailbox mode.
477 	 */
478 	if (mly_enable_mmbox(mly)) {
479 		aprint_error_dev(self, "unable to enable memory mailbox\n");
480 		goto bad;
481 	}
482 
483 	/*
484 	 * Print a little information about the controller.
485 	 */
486 	mi = mly->mly_controllerinfo;
487 
488 	printf("%s: %d physical channel%s, firmware %d.%02d-%d-%02d "
489 	    "(%02d%02d%02d%02d), %dMB RAM\n", device_xname(self),
490 	    mi->physical_channels_present,
491 	    (mi->physical_channels_present) > 1 ? "s" : "",
492 	    mi->fw_major, mi->fw_minor, mi->fw_turn, mi->fw_build,
493 	    mi->fw_century, mi->fw_year, mi->fw_month, mi->fw_day,
494 	    le16toh(mi->memory_size));
495 
496 	/*
497 	 * Register our `shutdownhook'.
498 	 */
499 	if (mly_sdh == NULL)
500 		shutdownhook_establish(mly_shutdown, NULL);
501 
502 	/*
503 	 * Clear any previous BTL information.  For each bus that scsipi
504 	 * wants to scan, we'll receive the SCBUSIOLLSCAN ioctl and retrieve
505 	 * all BTL info at that point.
506 	 */
507 	memset(&mly->mly_btl, 0, sizeof(mly->mly_btl));
508 
509 	mly->mly_nchans = mly->mly_controllerinfo->physical_channels_present +
510 	    mly->mly_controllerinfo->virtual_channels_present;
511 
512 	/*
513 	 * Attach to scsipi.
514 	 */
515 	adapt = &mly->mly_adapt;
516 	memset(adapt, 0, sizeof(*adapt));
517 	adapt->adapt_dev = self;
518 	adapt->adapt_nchannels = mly->mly_nchans;
519 	adapt->adapt_openings = mly->mly_ncmds - MLY_CCBS_RESV;
520 	adapt->adapt_max_periph = mly->mly_ncmds - MLY_CCBS_RESV;
521 	adapt->adapt_request = mly_scsipi_request;
522 	adapt->adapt_minphys = mly_scsipi_minphys;
523 	adapt->adapt_ioctl = mly_scsipi_ioctl;
524 
525 	for (i = 0; i < mly->mly_nchans; i++) {
526 		chan = &mly->mly_chans[i];
527 		memset(chan, 0, sizeof(*chan));
528 		chan->chan_adapter = adapt;
529 		chan->chan_bustype = &scsi_bustype;
530 		chan->chan_channel = i;
531 		chan->chan_ntargets = MLY_MAX_TARGETS;
532 		chan->chan_nluns = MLY_MAX_LUNS;
533 		chan->chan_id = mly->mly_controllerparam->initiator_id;
534 		chan->chan_flags = SCSIPI_CHAN_NOSETTLE;
535 		config_found(self, chan, scsiprint);
536 	}
537 
538 	/*
539 	 * Now enable interrupts...
540 	 */
541 	mly_outb(mly, mly->mly_interrupt_mask, MLY_INTERRUPT_MASK_ENABLE);
542 
543 	/*
544 	 * Finally, create our monitoring thread.
545 	 */
546 	mly->mly_state |= MLY_STATE_INITOK;
547 	rv = kthread_create(PRI_NONE, 0, NULL, mly_thread, mly,
548 	    &mly->mly_thread, "%s", device_xname(self));
549  	if (rv != 0)
550 		aprint_error_dev(self, "unable to create thread (%d)\n", rv);
551 	return;
552 
553  bad:
554 	if (state > 2)
555 		mly_release_ccbs(mly);
556 	if (state > 1)
557 		mly_dmamem_free(mly, sizeof(struct mly_mmbox),
558 		    mly->mly_mmbox_dmamap, (void *)mly->mly_mmbox,
559 		    &mly->mly_mmbox_seg);
560 	if (state > 0)
561 		mly_dmamem_free(mly, MLY_SGL_SIZE * MLY_MAX_CCBS,
562 		    mly->mly_sg_dmamap, (void *)mly->mly_sg,
563 		    &mly->mly_sg_seg);
564 }
565 
566 /*
567  * Scan all possible devices on the specified channel.
568  */
569 static void
mly_scan_channel(struct mly_softc * mly,int bus)570 mly_scan_channel(struct mly_softc *mly, int bus)
571 {
572 	int s, target;
573 
574 	for (target = 0; target < MLY_MAX_TARGETS; target++) {
575 		s = splbio();
576 		if (!mly_scan_btl(mly, bus, target)) {
577 			tsleep(&mly->mly_btl[bus][target], PRIBIO, "mlyscan",
578 			    0);
579 		}
580 		splx(s);
581 	}
582 }
583 
584 /*
585  * Shut down all configured `mly' devices.
586  */
587 static void
mly_shutdown(void * cookie)588 mly_shutdown(void *cookie)
589 {
590 	struct mly_softc *mly;
591 	int i;
592 
593 	for (i = 0; i < mly_cd.cd_ndevs; i++) {
594 		if ((mly = device_lookup_private(&mly_cd, i)) == NULL)
595 			continue;
596 
597 		if (mly_flush(mly))
598 			aprint_error_dev(mly->mly_dv, "unable to flush cache\n");
599 	}
600 }
601 
602 /*
603  * Fill in the mly_controllerinfo and mly_controllerparam fields in the
604  * softc.
605  */
606 static int
mly_get_controllerinfo(struct mly_softc * mly)607 mly_get_controllerinfo(struct mly_softc *mly)
608 {
609 	struct mly_cmd_ioctl mci;
610 	int rv;
611 
612 	/*
613 	 * Build the getcontrollerinfo ioctl and send it.
614 	 */
615 	memset(&mci, 0, sizeof(mci));
616 	mci.sub_ioctl = MDACIOCTL_GETCONTROLLERINFO;
617 	rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerinfo,
618 	    sizeof(*mly->mly_controllerinfo), NULL, NULL);
619 	if (rv != 0)
620 		return (rv);
621 
622 	/*
623 	 * Build the getcontrollerparameter ioctl and send it.
624 	 */
625 	memset(&mci, 0, sizeof(mci));
626 	mci.sub_ioctl = MDACIOCTL_GETCONTROLLERPARAMETER;
627 	rv = mly_ioctl(mly, &mci, (void **)&mly->mly_controllerparam,
628 	    sizeof(*mly->mly_controllerparam), NULL, NULL);
629 
630 	return (rv);
631 }
632 
633 /*
634  * Rescan a device, possibly as a consequence of getting an event which
635  * suggests that it may have changed.  Must be called with interrupts
636  * blocked.
637  */
638 static int
mly_scan_btl(struct mly_softc * mly,int bus,int target)639 mly_scan_btl(struct mly_softc *mly, int bus, int target)
640 {
641 	struct mly_ccb *mc;
642 	struct mly_cmd_ioctl *mci;
643 	int rv;
644 
645 	if (target == mly->mly_controllerparam->initiator_id) {
646 		mly->mly_btl[bus][target].mb_flags = MLY_BTL_PROTECTED;
647 		return (EIO);
648 	}
649 
650 	/* Don't re-scan if a scan is already in progress. */
651 	if ((mly->mly_btl[bus][target].mb_flags & MLY_BTL_SCANNING) != 0)
652 		return (EBUSY);
653 
654 	/* Get a command. */
655 	if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
656 		return (rv);
657 
658 	/* Set up the data buffer. */
659 	mc->mc_data = malloc(sizeof(union mly_devinfo),
660 	    M_DEVBUF, M_NOWAIT|M_ZERO);
661 
662 	mc->mc_flags |= MLY_CCB_DATAIN;
663 	mc->mc_complete = mly_complete_rescan;
664 
665 	/*
666 	 * Build the ioctl.
667 	 */
668 	mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
669 	mci->opcode = MDACMD_IOCTL;
670 	mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
671 	memset(&mci->param, 0, sizeof(mci->param));
672 
673 	if (MLY_BUS_IS_VIRTUAL(mly, bus)) {
674 		mc->mc_length = sizeof(struct mly_ioctl_getlogdevinfovalid);
675 		mci->data_size = htole32(mc->mc_length);
676 		mci->sub_ioctl = MDACIOCTL_GETLOGDEVINFOVALID;
677 		_lto3l(MLY_LOGADDR(0, MLY_LOGDEV_ID(mly, bus, target)),
678 		    mci->addr);
679 	} else {
680 		mc->mc_length = sizeof(struct mly_ioctl_getphysdevinfovalid);
681 		mci->data_size = htole32(mc->mc_length);
682 		mci->sub_ioctl = MDACIOCTL_GETPHYSDEVINFOVALID;
683 		_lto3l(MLY_PHYADDR(0, bus, target, 0), mci->addr);
684 	}
685 
686 	/*
687 	 * Dispatch the command.
688 	 */
689 	if ((rv = mly_ccb_map(mly, mc)) != 0) {
690 		free(mc->mc_data, M_DEVBUF);
691 		mly_ccb_free(mly, mc);
692 		return(rv);
693 	}
694 
695 	mly->mly_btl[bus][target].mb_flags |= MLY_BTL_SCANNING;
696 	mly_ccb_enqueue(mly, mc);
697 	return (0);
698 }
699 
700 /*
701  * Handle the completion of a rescan operation.
702  */
703 static void
mly_complete_rescan(struct mly_softc * mly,struct mly_ccb * mc)704 mly_complete_rescan(struct mly_softc *mly, struct mly_ccb *mc)
705 {
706 	struct mly_ioctl_getlogdevinfovalid *ldi;
707 	struct mly_ioctl_getphysdevinfovalid *pdi;
708 	struct mly_cmd_ioctl *mci;
709 	struct mly_btl btl, *btlp;
710 	struct scsipi_xfer_mode xm;
711 	int bus, target, rescan;
712 	u_int tmp;
713 
714 	mly_ccb_unmap(mly, mc);
715 
716 	/*
717 	 * Recover the bus and target from the command.  We need these even
718 	 * in the case where we don't have a useful response.
719 	 */
720 	mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
721 	tmp = _3ltol(mci->addr);
722 	rescan = 0;
723 
724 	if (mci->sub_ioctl == MDACIOCTL_GETLOGDEVINFOVALID) {
725 		bus = MLY_LOGDEV_BUS(mly, MLY_LOGADDR_DEV(tmp));
726 		target = MLY_LOGDEV_TARGET(mly, MLY_LOGADDR_DEV(tmp));
727 	} else {
728 		bus = MLY_PHYADDR_CHANNEL(tmp);
729 		target = MLY_PHYADDR_TARGET(tmp);
730 	}
731 
732 	btlp = &mly->mly_btl[bus][target];
733 
734 	/* The default result is 'no device'. */
735 	memset(&btl, 0, sizeof(btl));
736 	btl.mb_flags = MLY_BTL_PROTECTED;
737 
738 	/* If the rescan completed OK, we have possibly-new BTL data. */
739 	if (mc->mc_status != 0)
740 		goto out;
741 
742 	if (mc->mc_length == sizeof(*ldi)) {
743 		ldi = (struct mly_ioctl_getlogdevinfovalid *)mc->mc_data;
744 		tmp = le32toh(ldi->logical_device_number);
745 
746 		if (MLY_LOGDEV_BUS(mly, tmp) != bus ||
747 		    MLY_LOGDEV_TARGET(mly, tmp) != target) {
748 #ifdef MLYDEBUG
749 			printf("%s: WARNING: BTL rescan (logical) for %d:%d "
750 			    "returned data for %d:%d instead\n",
751 			   device_xname(mly->mly_dv), bus, target,
752 			   MLY_LOGDEV_BUS(mly, tmp),
753 			   MLY_LOGDEV_TARGET(mly, tmp));
754 #endif
755 			goto out;
756 		}
757 
758 		btl.mb_flags = MLY_BTL_LOGICAL | MLY_BTL_TQING;
759 		btl.mb_type = ldi->raid_level;
760 		btl.mb_state = ldi->state;
761 	} else if (mc->mc_length == sizeof(*pdi)) {
762 		pdi = (struct mly_ioctl_getphysdevinfovalid *)mc->mc_data;
763 
764 		if (pdi->channel != bus || pdi->target != target) {
765 #ifdef MLYDEBUG
766 			printf("%s: WARNING: BTL rescan (physical) for %d:%d "
767 			    " returned data for %d:%d instead\n",
768 			   device_xname(mly->mly_dv),
769 			   bus, target, pdi->channel, pdi->target);
770 #endif
771 			goto out;
772 		}
773 
774 		btl.mb_flags = MLY_BTL_PHYSICAL;
775 		btl.mb_type = MLY_DEVICE_TYPE_PHYSICAL;
776 		btl.mb_state = pdi->state;
777 		btl.mb_speed = pdi->speed;
778 		btl.mb_width = pdi->width;
779 
780 		if (pdi->state != MLY_DEVICE_STATE_UNCONFIGURED)
781 			btl.mb_flags |= MLY_BTL_PROTECTED;
782 		if (pdi->command_tags != 0)
783 			btl.mb_flags |= MLY_BTL_TQING;
784 	} else {
785 		printf("%s: BTL rescan result invalid\n", device_xname(mly->mly_dv));
786 		goto out;
787 	}
788 
789 	/* Decide whether we need to rescan the device. */
790 	if (btl.mb_flags != btlp->mb_flags ||
791 	    btl.mb_speed != btlp->mb_speed ||
792 	    btl.mb_width != btlp->mb_width)
793 		rescan = 1;
794 
795  out:
796 	*btlp = btl;
797 
798 	if (rescan && (btl.mb_flags & MLY_BTL_PROTECTED) == 0) {
799 		xm.xm_target = target;
800 		mly_get_xfer_mode(mly, bus, &xm);
801 		/* XXX SCSI mid-layer rescan goes here. */
802 	}
803 
804 	/* Wake anybody waiting on the device to be rescanned. */
805 	wakeup(btlp);
806 
807 	free(mc->mc_data, M_DEVBUF);
808 	mly_ccb_free(mly, mc);
809 }
810 
811 /*
812  * Get the current health status and set the 'next event' counter to suit.
813  */
814 static int
mly_get_eventstatus(struct mly_softc * mly)815 mly_get_eventstatus(struct mly_softc *mly)
816 {
817 	struct mly_cmd_ioctl mci;
818 	struct mly_health_status *mh;
819 	int rv;
820 
821 	/* Build the gethealthstatus ioctl and send it. */
822 	memset(&mci, 0, sizeof(mci));
823 	mh = NULL;
824 	mci.sub_ioctl = MDACIOCTL_GETHEALTHSTATUS;
825 
826 	rv = mly_ioctl(mly, &mci, (void *)&mh, sizeof(*mh), NULL, NULL);
827 	if (rv)
828 		return (rv);
829 
830 	/* Get the event counter. */
831 	mly->mly_event_change = le32toh(mh->change_counter);
832 	mly->mly_event_waiting = le32toh(mh->next_event);
833 	mly->mly_event_counter = le32toh(mh->next_event);
834 
835 	/* Save the health status into the memory mailbox */
836 	memcpy(&mly->mly_mmbox->mmm_health.status, mh, sizeof(*mh));
837 
838 	bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
839 	    offsetof(struct mly_mmbox, mmm_health),
840 	    sizeof(mly->mly_mmbox->mmm_health),
841 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
842 
843 	free(mh, M_DEVBUF);
844 	return (0);
845 }
846 
847 /*
848  * Enable memory mailbox mode.
849  */
850 static int
mly_enable_mmbox(struct mly_softc * mly)851 mly_enable_mmbox(struct mly_softc *mly)
852 {
853 	struct mly_cmd_ioctl mci;
854 	u_int8_t *sp;
855 	u_int64_t tmp;
856 	int rv;
857 
858 	/* Build the ioctl and send it. */
859 	memset(&mci, 0, sizeof(mci));
860 	mci.sub_ioctl = MDACIOCTL_SETMEMORYMAILBOX;
861 
862 	/* Set buffer addresses. */
863 	tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_command);
864 	mci.param.setmemorymailbox.command_mailbox_physaddr = htole64(tmp);
865 
866 	tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_status);
867 	mci.param.setmemorymailbox.status_mailbox_physaddr = htole64(tmp);
868 
869 	tmp = mly->mly_mmbox_busaddr + offsetof(struct mly_mmbox, mmm_health);
870 	mci.param.setmemorymailbox.health_buffer_physaddr = htole64(tmp);
871 
872 	/* Set buffer sizes - abuse of data_size field is revolting. */
873 	sp = (u_int8_t *)&mci.data_size;
874 	sp[0] = (sizeof(union mly_cmd_packet) * MLY_MMBOX_COMMANDS) >> 10;
875 	sp[1] = (sizeof(union mly_status_packet) * MLY_MMBOX_STATUS) >> 10;
876 	mci.param.setmemorymailbox.health_buffer_size =
877 	    sizeof(union mly_health_region) >> 10;
878 
879 	rv = mly_ioctl(mly, &mci, NULL, 0, NULL, NULL);
880 	if (rv)
881 		return (rv);
882 
883 	mly->mly_state |= MLY_STATE_MMBOX_ACTIVE;
884 	return (0);
885 }
886 
887 /*
888  * Flush all pending I/O from the controller.
889  */
890 static int
mly_flush(struct mly_softc * mly)891 mly_flush(struct mly_softc *mly)
892 {
893 	struct mly_cmd_ioctl mci;
894 
895 	/* Build the ioctl */
896 	memset(&mci, 0, sizeof(mci));
897 	mci.sub_ioctl = MDACIOCTL_FLUSHDEVICEDATA;
898 	mci.param.deviceoperation.operation_device =
899 	    MLY_OPDEVICE_PHYSICAL_CONTROLLER;
900 
901 	/* Pass it off to the controller */
902 	return (mly_ioctl(mly, &mci, NULL, 0, NULL, NULL));
903 }
904 
905 /*
906  * Perform an ioctl command.
907  *
908  * If (data) is not NULL, the command requires data transfer to the
909  * controller.  If (*data) is NULL the command requires data transfer from
910  * the controller, and we will allocate a buffer for it.
911  */
912 static int
mly_ioctl(struct mly_softc * mly,struct mly_cmd_ioctl * ioctl,void ** data,size_t datasize,void * sense_buffer,size_t * sense_length)913 mly_ioctl(struct mly_softc *mly, struct mly_cmd_ioctl *ioctl, void **data,
914 	  size_t datasize, void *sense_buffer,
915 	  size_t *sense_length)
916 {
917 	struct mly_ccb *mc;
918 	struct mly_cmd_ioctl *mci;
919 	u_int8_t status;
920 	int rv;
921 
922 	mc = NULL;
923 	if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
924 		goto bad;
925 
926 	/*
927 	 * Copy the ioctl structure, but save some important fields and then
928 	 * fixup.
929 	 */
930 	mci = &mc->mc_packet->ioctl;
931 	ioctl->sense_buffer_address = htole64(mci->sense_buffer_address);
932 	ioctl->maximum_sense_size = mci->maximum_sense_size;
933 	*mci = *ioctl;
934 	mci->opcode = MDACMD_IOCTL;
935 	mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
936 
937 	/* Handle the data buffer. */
938 	if (data != NULL) {
939 		if (*data == NULL) {
940 			/* Allocate data buffer */
941 			mc->mc_data = malloc(datasize, M_DEVBUF, M_NOWAIT);
942 			mc->mc_flags |= MLY_CCB_DATAIN;
943 		} else {
944 			mc->mc_data = *data;
945 			mc->mc_flags |= MLY_CCB_DATAOUT;
946 		}
947 		mc->mc_length = datasize;
948 		mc->mc_packet->generic.data_size = htole32(datasize);
949 	}
950 
951 	/* Run the command. */
952 	if (datasize > 0)
953 		if ((rv = mly_ccb_map(mly, mc)) != 0)
954 			goto bad;
955 	rv = mly_ccb_poll(mly, mc, 30000);
956 	if (datasize > 0)
957 		mly_ccb_unmap(mly, mc);
958 	if (rv != 0)
959 		goto bad;
960 
961 	/* Clean up and return any data. */
962 	status = mc->mc_status;
963 
964 	if (status != 0)
965 		printf("mly_ioctl: command status %d\n", status);
966 
967 	if (mc->mc_sense > 0 && sense_buffer != NULL) {
968 		memcpy(sense_buffer, mc->mc_packet, mc->mc_sense);
969 		*sense_length = mc->mc_sense;
970 		goto bad;
971 	}
972 
973 	/* Should we return a data pointer? */
974 	if (data != NULL && *data == NULL)
975 		*data = mc->mc_data;
976 
977 	/* Command completed OK. */
978 	rv = (status != 0 ? EIO : 0);
979 
980  bad:
981 	if (mc != NULL) {
982 		/* Do we need to free a data buffer we allocated? */
983 		if (rv != 0 && mc->mc_data != NULL &&
984 		    (data == NULL || *data == NULL))
985 			free(mc->mc_data, M_DEVBUF);
986 		mly_ccb_free(mly, mc);
987 	}
988 
989 	return (rv);
990 }
991 
992 /*
993  * Check for event(s) outstanding in the controller.
994  */
995 static void
mly_check_event(struct mly_softc * mly)996 mly_check_event(struct mly_softc *mly)
997 {
998 
999 	bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1000 	    offsetof(struct mly_mmbox, mmm_health),
1001 	    sizeof(mly->mly_mmbox->mmm_health),
1002 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1003 
1004 	/*
1005 	 * The controller may have updated the health status information, so
1006 	 * check for it here.  Note that the counters are all in host
1007 	 * memory, so this check is very cheap.  Also note that we depend on
1008 	 * checking on completion
1009 	 */
1010 	if (le32toh(mly->mly_mmbox->mmm_health.status.change_counter) !=
1011 	    mly->mly_event_change) {
1012 		mly->mly_event_change =
1013 		    le32toh(mly->mly_mmbox->mmm_health.status.change_counter);
1014 		mly->mly_event_waiting =
1015 		    le32toh(mly->mly_mmbox->mmm_health.status.next_event);
1016 
1017 		/* Wake up anyone that might be interested in this. */
1018 		wakeup(&mly->mly_event_change);
1019 	}
1020 
1021 	bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1022 	    offsetof(struct mly_mmbox, mmm_health),
1023 	    sizeof(mly->mly_mmbox->mmm_health),
1024 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1025 
1026 	if (mly->mly_event_counter != mly->mly_event_waiting)
1027 		mly_fetch_event(mly);
1028 }
1029 
1030 /*
1031  * Fetch one event from the controller.  If we fail due to resource
1032  * starvation, we'll be retried the next time a command completes.
1033  */
1034 static void
mly_fetch_event(struct mly_softc * mly)1035 mly_fetch_event(struct mly_softc *mly)
1036 {
1037 	struct mly_ccb *mc;
1038 	struct mly_cmd_ioctl *mci;
1039 	int s;
1040 	u_int32_t event;
1041 
1042 	/* Get a command. */
1043 	if (mly_ccb_alloc(mly, &mc))
1044 		return;
1045 
1046 	/* Set up the data buffer. */
1047 	mc->mc_data = malloc(sizeof(struct mly_event), M_DEVBUF,
1048 	    M_NOWAIT|M_ZERO);
1049 
1050 	mc->mc_length = sizeof(struct mly_event);
1051 	mc->mc_flags |= MLY_CCB_DATAIN;
1052 	mc->mc_complete = mly_complete_event;
1053 
1054 	/*
1055 	 * Get an event number to fetch.  It's possible that we've raced
1056 	 * with another context for the last event, in which case there will
1057 	 * be no more events.
1058 	 */
1059 	s = splbio();
1060 	if (mly->mly_event_counter == mly->mly_event_waiting) {
1061 		splx(s);
1062 		free(mc->mc_data, M_DEVBUF);
1063 		mly_ccb_free(mly, mc);
1064 		return;
1065 	}
1066 	event = mly->mly_event_counter++;
1067 	splx(s);
1068 
1069 	/*
1070 	 * Build the ioctl.
1071 	 *
1072 	 * At this point we are committed to sending this request, as it
1073 	 * will be the only one constructed for this particular event
1074 	 * number.
1075 	 */
1076 	mci = (struct mly_cmd_ioctl *)&mc->mc_packet->ioctl;
1077 	mci->opcode = MDACMD_IOCTL;
1078 	mci->data_size = htole32(sizeof(struct mly_event));
1079 	_lto3l(MLY_PHYADDR(0, 0, (event >> 16) & 0xff, (event >> 24) & 0xff),
1080 	    mci->addr);
1081 	mci->timeout = 30 | MLY_TIMEOUT_SECONDS;
1082 	mci->sub_ioctl = MDACIOCTL_GETEVENT;
1083 	mci->param.getevent.sequence_number_low = htole16(event & 0xffff);
1084 
1085 	/*
1086 	 * Submit the command.
1087 	 */
1088 	if (mly_ccb_map(mly, mc) != 0)
1089 		goto bad;
1090 	mly_ccb_enqueue(mly, mc);
1091 	return;
1092 
1093  bad:
1094 	printf("%s: couldn't fetch event %u\n", device_xname(mly->mly_dv), event);
1095 	free(mc->mc_data, M_DEVBUF);
1096 	mly_ccb_free(mly, mc);
1097 }
1098 
1099 /*
1100  * Handle the completion of an event poll.
1101  */
1102 static void
mly_complete_event(struct mly_softc * mly,struct mly_ccb * mc)1103 mly_complete_event(struct mly_softc *mly, struct mly_ccb *mc)
1104 {
1105 	struct mly_event *me;
1106 
1107 	me = (struct mly_event *)mc->mc_data;
1108 	mly_ccb_unmap(mly, mc);
1109 	mly_ccb_free(mly, mc);
1110 
1111 	/* If the event was successfully fetched, process it. */
1112 	if (mc->mc_status == SCSI_OK)
1113 		mly_process_event(mly, me);
1114 	else
1115 		aprint_error_dev(mly->mly_dv, "unable to fetch event; status = 0x%x\n",
1116 		    mc->mc_status);
1117 
1118 	free(me, M_DEVBUF);
1119 
1120 	/* Check for another event. */
1121 	mly_check_event(mly);
1122 }
1123 
1124 /*
1125  * Process a controller event.  Called with interrupts blocked (i.e., at
1126  * interrupt time).
1127  */
1128 static void
mly_process_event(struct mly_softc * mly,struct mly_event * me)1129 mly_process_event(struct mly_softc *mly, struct mly_event *me)
1130 {
1131 	struct scsi_sense_data *ssd;
1132 	int bus, target, event, class, action;
1133 	const char *fp, *tp;
1134 
1135 	ssd = (struct scsi_sense_data *)&me->sense[0];
1136 
1137 	/*
1138 	 * Errors can be reported using vendor-unique sense data.  In this
1139 	 * case, the event code will be 0x1c (Request sense data present),
1140 	 * the sense key will be 0x09 (vendor specific), the MSB of the ASC
1141 	 * will be set, and the actual event code will be a 16-bit value
1142 	 * comprised of the ASCQ (low byte) and low seven bits of the ASC
1143 	 * (low seven bits of the high byte).
1144 	 */
1145 	if (le32toh(me->code) == 0x1c &&
1146 	    SSD_SENSE_KEY(ssd->flags) == SKEY_VENDOR_SPECIFIC &&
1147 	    (ssd->asc & 0x80) != 0) {
1148 		event = ((int)(ssd->asc & ~0x80) << 8) +
1149 		    ssd->ascq;
1150 	} else
1151 		event = le32toh(me->code);
1152 
1153 	/* Look up event, get codes. */
1154 	fp = mly_describe_code(mly_table_event, event);
1155 
1156 	/* Quiet event? */
1157 	class = fp[0];
1158 #ifdef notyet
1159 	if (isupper(class) && bootverbose)
1160 		class = tolower(class);
1161 #endif
1162 
1163 	/* Get action code, text string. */
1164 	action = fp[1];
1165 	tp = fp + 3;
1166 
1167 	/*
1168 	 * Print some information about the event.
1169 	 *
1170 	 * This code uses a table derived from the corresponding portion of
1171 	 * the Linux driver, and thus the parser is very similar.
1172 	 */
1173 	switch (class) {
1174 	case 'p':
1175 		/*
1176 		 * Error on physical drive.
1177 		 */
1178 		printf("%s: physical device %d:%d %s\n", device_xname(mly->mly_dv),
1179 		    me->channel, me->target, tp);
1180 		if (action == 'r')
1181 			mly->mly_btl[me->channel][me->target].mb_flags |=
1182 			    MLY_BTL_RESCAN;
1183 		break;
1184 
1185 	case 'l':
1186 	case 'm':
1187 		/*
1188 		 * Error on logical unit, or message about logical unit.
1189 	 	 */
1190 		bus = MLY_LOGDEV_BUS(mly, me->lun);
1191 		target = MLY_LOGDEV_TARGET(mly, me->lun);
1192 		printf("%s: logical device %d:%d %s\n", device_xname(mly->mly_dv),
1193 		    bus, target, tp);
1194 		if (action == 'r')
1195 			mly->mly_btl[bus][target].mb_flags |= MLY_BTL_RESCAN;
1196 		break;
1197 
1198 	case 's':
1199 		/*
1200 		 * Report of sense data.
1201 		 */
1202 		if ((SSD_SENSE_KEY(ssd->flags) == SKEY_NO_SENSE ||
1203 		     SSD_SENSE_KEY(ssd->flags) == SKEY_NOT_READY) &&
1204 		    ssd->asc == 0x04 &&
1205 		    (ssd->ascq == 0x01 ||
1206 		     ssd->ascq == 0x02)) {
1207 			/* Ignore NO_SENSE or NOT_READY in one case */
1208 			break;
1209 		}
1210 
1211 		/*
1212 		 * XXX Should translate this if SCSIVERBOSE.
1213 		 */
1214 		printf("%s: physical device %d:%d %s\n", device_xname(mly->mly_dv),
1215 		    me->channel, me->target, tp);
1216 		printf("%s:  sense key %d  asc %02x  ascq %02x\n",
1217 		    device_xname(mly->mly_dv), SSD_SENSE_KEY(ssd->flags),
1218 		    ssd->asc, ssd->ascq);
1219 		printf("%s:  info %x%x%x%x  csi %x%x%x%x\n",
1220 		    device_xname(mly->mly_dv), ssd->info[0], ssd->info[1],
1221 		    ssd->info[2], ssd->info[3], ssd->csi[0],
1222 		    ssd->csi[1], ssd->csi[2],
1223 		    ssd->csi[3]);
1224 		if (action == 'r')
1225 			mly->mly_btl[me->channel][me->target].mb_flags |=
1226 			    MLY_BTL_RESCAN;
1227 		break;
1228 
1229 	case 'e':
1230 		printf("%s: ", device_xname(mly->mly_dv));
1231 		printf(tp, me->target, me->lun);
1232 		break;
1233 
1234 	case 'c':
1235 		printf("%s: controller %s\n", device_xname(mly->mly_dv), tp);
1236 		break;
1237 
1238 	case '?':
1239 		printf("%s: %s - %d\n", device_xname(mly->mly_dv), tp, event);
1240 		break;
1241 
1242 	default:
1243 		/* Probably a 'noisy' event being ignored. */
1244 		break;
1245 	}
1246 }
1247 
1248 /*
1249  * Perform periodic activities.
1250  */
1251 static void
mly_thread(void * cookie)1252 mly_thread(void *cookie)
1253 {
1254 	struct mly_softc *mly;
1255 	struct mly_btl *btl;
1256 	int s, bus, target, done;
1257 
1258 	mly = (struct mly_softc *)cookie;
1259 
1260 	for (;;) {
1261 		/* Check for new events. */
1262 		mly_check_event(mly);
1263 
1264 		/* Re-scan up to 1 device. */
1265 		s = splbio();
1266 		done = 0;
1267 		for (bus = 0; bus < mly->mly_nchans && !done; bus++) {
1268 			for (target = 0; target < MLY_MAX_TARGETS; target++) {
1269 				/* Perform device rescan? */
1270 				btl = &mly->mly_btl[bus][target];
1271 				if ((btl->mb_flags & MLY_BTL_RESCAN) != 0) {
1272 					btl->mb_flags ^= MLY_BTL_RESCAN;
1273 					mly_scan_btl(mly, bus, target);
1274 					done = 1;
1275 					break;
1276 				}
1277 			}
1278 		}
1279 		splx(s);
1280 
1281 		/* Sleep for N seconds. */
1282 		tsleep(mly_thread, PWAIT, "mlyzzz",
1283 		    hz * MLY_PERIODIC_INTERVAL);
1284 	}
1285 }
1286 
1287 /*
1288  * Submit a command to the controller and poll on completion.  Return
1289  * non-zero on timeout.
1290  */
1291 static int
mly_ccb_poll(struct mly_softc * mly,struct mly_ccb * mc,int timo)1292 mly_ccb_poll(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1293 {
1294 	int rv;
1295 
1296 	if ((rv = mly_ccb_submit(mly, mc)) != 0)
1297 		return (rv);
1298 
1299 	for (timo *= 10; timo != 0; timo--) {
1300 		if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0)
1301 			break;
1302 		mly_intr(mly);
1303 		DELAY(100);
1304 	}
1305 
1306 	return (timo == 0);
1307 }
1308 
1309 /*
1310  * Submit a command to the controller and sleep on completion.  Return
1311  * non-zero on timeout.
1312  */
1313 static int
mly_ccb_wait(struct mly_softc * mly,struct mly_ccb * mc,int timo)1314 mly_ccb_wait(struct mly_softc *mly, struct mly_ccb *mc, int timo)
1315 {
1316 	int rv, s;
1317 
1318 	mly_ccb_enqueue(mly, mc);
1319 
1320 	s = splbio();
1321 	if ((mc->mc_flags & MLY_CCB_COMPLETE) != 0) {
1322 		splx(s);
1323 		return (0);
1324 	}
1325 	rv = tsleep(mc, PRIBIO, "mlywccb", timo * hz / 1000);
1326 	splx(s);
1327 
1328 	return (rv);
1329 }
1330 
1331 /*
1332  * If a CCB is specified, enqueue it.  Pull CCBs off the software queue in
1333  * the order that they were enqueued and try to submit their command blocks
1334  * to the controller for execution.
1335  */
1336 void
mly_ccb_enqueue(struct mly_softc * mly,struct mly_ccb * mc)1337 mly_ccb_enqueue(struct mly_softc *mly, struct mly_ccb *mc)
1338 {
1339 	int s;
1340 
1341 	s = splbio();
1342 
1343 	if (mc != NULL)
1344 		SIMPLEQ_INSERT_TAIL(&mly->mly_ccb_queue, mc, mc_link.simpleq);
1345 
1346 	while ((mc = SIMPLEQ_FIRST(&mly->mly_ccb_queue)) != NULL) {
1347 		if (mly_ccb_submit(mly, mc))
1348 			break;
1349 		SIMPLEQ_REMOVE_HEAD(&mly->mly_ccb_queue, mc_link.simpleq);
1350 	}
1351 
1352 	splx(s);
1353 }
1354 
1355 /*
1356  * Deliver a command to the controller.
1357  */
1358 static int
mly_ccb_submit(struct mly_softc * mly,struct mly_ccb * mc)1359 mly_ccb_submit(struct mly_softc *mly, struct mly_ccb *mc)
1360 {
1361 	union mly_cmd_packet *pkt;
1362 	int s, off;
1363 
1364 	mc->mc_packet->generic.command_id = htole16(mc->mc_slot);
1365 
1366 	bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1367 	    mc->mc_packetphys - mly->mly_pkt_busaddr,
1368 	    sizeof(union mly_cmd_packet),
1369 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1370 
1371 	s = splbio();
1372 
1373 	/*
1374 	 * Do we have to use the hardware mailbox?
1375 	 */
1376 	if ((mly->mly_state & MLY_STATE_MMBOX_ACTIVE) == 0) {
1377 		/*
1378 		 * Check to see if the controller is ready for us.
1379 		 */
1380 		if (mly_idbr_true(mly, MLY_HM_CMDSENT)) {
1381 			splx(s);
1382 			return (EBUSY);
1383 		}
1384 
1385 		/*
1386 		 * It's ready, send the command.
1387 		 */
1388 		mly_outl(mly, mly->mly_cmd_mailbox,
1389 		    (u_int64_t)mc->mc_packetphys & 0xffffffff);
1390 		mly_outl(mly, mly->mly_cmd_mailbox + 4,
1391 		    (u_int64_t)mc->mc_packetphys >> 32);
1392 		mly_outb(mly, mly->mly_idbr, MLY_HM_CMDSENT);
1393 	} else {
1394 		pkt = &mly->mly_mmbox->mmm_command[mly->mly_mmbox_cmd_idx];
1395 		off = (char *)pkt - (char *)mly->mly_mmbox;
1396 
1397 		bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1398 		    off, sizeof(mly->mly_mmbox->mmm_command[0]),
1399 		    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1400 
1401 		/* Check to see if the next index is free yet. */
1402 		if (pkt->mmbox.flag != 0) {
1403 			splx(s);
1404 			return (EBUSY);
1405 		}
1406 
1407 		/* Copy in new command */
1408 		memcpy(pkt->mmbox.data, mc->mc_packet->mmbox.data,
1409 		    sizeof(pkt->mmbox.data));
1410 
1411 		/* Copy flag last. */
1412 		pkt->mmbox.flag = mc->mc_packet->mmbox.flag;
1413 
1414 		bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1415 		    off, sizeof(mly->mly_mmbox->mmm_command[0]),
1416 		    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1417 
1418 		/* Signal controller and update index. */
1419 		mly_outb(mly, mly->mly_idbr, MLY_AM_CMDSENT);
1420 		mly->mly_mmbox_cmd_idx =
1421 		    (mly->mly_mmbox_cmd_idx + 1) % MLY_MMBOX_COMMANDS;
1422 	}
1423 
1424 	splx(s);
1425 	return (0);
1426 }
1427 
1428 /*
1429  * Pick up completed commands from the controller and handle accordingly.
1430  */
1431 int
mly_intr(void * cookie)1432 mly_intr(void *cookie)
1433 {
1434 	struct mly_ccb *mc;
1435 	union mly_status_packet	*sp;
1436 	u_int16_t slot;
1437 	int forus, off;
1438 	struct mly_softc *mly;
1439 
1440 	mly = cookie;
1441 	forus = 0;
1442 
1443 	/*
1444 	 * Pick up hardware-mailbox commands.
1445 	 */
1446 	if (mly_odbr_true(mly, MLY_HM_STSREADY)) {
1447 		slot = mly_inw(mly, mly->mly_status_mailbox);
1448 
1449 		if (slot < MLY_SLOT_MAX) {
1450 			mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1451 			mc->mc_status =
1452 			    mly_inb(mly, mly->mly_status_mailbox + 2);
1453 			mc->mc_sense =
1454 			    mly_inb(mly, mly->mly_status_mailbox + 3);
1455 			mc->mc_resid =
1456 			    mly_inl(mly, mly->mly_status_mailbox + 4);
1457 
1458 			mly_ccb_complete(mly, mc);
1459 		} else {
1460 			/* Slot 0xffff may mean "extremely bogus command". */
1461 			printf("%s: got HM completion for illegal slot %u\n",
1462 			    device_xname(mly->mly_dv), slot);
1463 		}
1464 
1465 		/* Unconditionally acknowledge status. */
1466 		mly_outb(mly, mly->mly_odbr, MLY_HM_STSREADY);
1467 		mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
1468 		forus = 1;
1469 	}
1470 
1471 	/*
1472 	 * Pick up memory-mailbox commands.
1473 	 */
1474 	if (mly_odbr_true(mly, MLY_AM_STSREADY)) {
1475 		for (;;) {
1476 			sp = &mly->mly_mmbox->mmm_status[mly->mly_mmbox_sts_idx];
1477 			off = (char *)sp - (char *)mly->mly_mmbox;
1478 
1479 			bus_dmamap_sync(mly->mly_dmat, mly->mly_mmbox_dmamap,
1480 			    off, sizeof(mly->mly_mmbox->mmm_command[0]),
1481 			    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
1482 
1483 			/* Check for more status. */
1484 			if (sp->mmbox.flag == 0)
1485 				break;
1486 
1487 			/* Get slot number. */
1488 			slot = le16toh(sp->status.command_id);
1489 			if (slot < MLY_SLOT_MAX) {
1490 				mc = mly->mly_ccbs + (slot - MLY_SLOT_START);
1491 				mc->mc_status = sp->status.status;
1492 				mc->mc_sense = sp->status.sense_length;
1493 				mc->mc_resid = le32toh(sp->status.residue);
1494 				mly_ccb_complete(mly, mc);
1495 			} else {
1496 				/*
1497 				 * Slot 0xffff may mean "extremely bogus
1498 				 * command".
1499 				 */
1500 				printf("%s: got AM completion for illegal "
1501 				    "slot %u at %d\n", device_xname(mly->mly_dv),
1502 				    slot, mly->mly_mmbox_sts_idx);
1503 			}
1504 
1505 			/* Clear and move to next index. */
1506 			sp->mmbox.flag = 0;
1507 			mly->mly_mmbox_sts_idx =
1508 			    (mly->mly_mmbox_sts_idx + 1) % MLY_MMBOX_STATUS;
1509 		}
1510 
1511 		/* Acknowledge that we have collected status value(s). */
1512 		mly_outb(mly, mly->mly_odbr, MLY_AM_STSREADY);
1513 		forus = 1;
1514 	}
1515 
1516 	/*
1517 	 * Run the queue.
1518 	 */
1519 	if (forus && ! SIMPLEQ_EMPTY(&mly->mly_ccb_queue))
1520 		mly_ccb_enqueue(mly, NULL);
1521 
1522 	return (forus);
1523 }
1524 
1525 /*
1526  * Process completed commands
1527  */
1528 static void
mly_ccb_complete(struct mly_softc * mly,struct mly_ccb * mc)1529 mly_ccb_complete(struct mly_softc *mly, struct mly_ccb *mc)
1530 {
1531 	void (*complete)(struct mly_softc *, struct mly_ccb *);
1532 
1533 	bus_dmamap_sync(mly->mly_dmat, mly->mly_pkt_dmamap,
1534 	    mc->mc_packetphys - mly->mly_pkt_busaddr,
1535 	    sizeof(union mly_cmd_packet),
1536 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1537 
1538 	complete = mc->mc_complete;
1539 	mc->mc_flags |= MLY_CCB_COMPLETE;
1540 
1541 	/*
1542 	 * Call completion handler or wake up sleeping consumer.
1543 	 */
1544 	if (complete != NULL)
1545 		(*complete)(mly, mc);
1546 	else
1547 		wakeup(mc);
1548 }
1549 
1550 /*
1551  * Allocate a command.
1552  */
1553 int
mly_ccb_alloc(struct mly_softc * mly,struct mly_ccb ** mcp)1554 mly_ccb_alloc(struct mly_softc *mly, struct mly_ccb **mcp)
1555 {
1556 	struct mly_ccb *mc;
1557 	int s;
1558 
1559 	s = splbio();
1560 	mc = SLIST_FIRST(&mly->mly_ccb_free);
1561 	if (mc != NULL)
1562 		SLIST_REMOVE_HEAD(&mly->mly_ccb_free, mc_link.slist);
1563 	splx(s);
1564 
1565 	*mcp = mc;
1566 	return (mc == NULL ? EAGAIN : 0);
1567 }
1568 
1569 /*
1570  * Release a command back to the freelist.
1571  */
1572 void
mly_ccb_free(struct mly_softc * mly,struct mly_ccb * mc)1573 mly_ccb_free(struct mly_softc *mly, struct mly_ccb *mc)
1574 {
1575 	int s;
1576 
1577 	/*
1578 	 * Fill in parts of the command that may cause confusion if a
1579 	 * consumer doesn't when we are later allocated.
1580 	 */
1581 	mc->mc_data = NULL;
1582 	mc->mc_flags = 0;
1583 	mc->mc_complete = NULL;
1584 	mc->mc_private = NULL;
1585 	mc->mc_packet->generic.command_control = 0;
1586 
1587 	/*
1588 	 * By default, we set up to overwrite the command packet with sense
1589 	 * information.
1590 	 */
1591 	mc->mc_packet->generic.sense_buffer_address =
1592 	    htole64(mc->mc_packetphys);
1593 	mc->mc_packet->generic.maximum_sense_size =
1594 	    sizeof(union mly_cmd_packet);
1595 
1596 	s = splbio();
1597 	SLIST_INSERT_HEAD(&mly->mly_ccb_free, mc, mc_link.slist);
1598 	splx(s);
1599 }
1600 
1601 /*
1602  * Allocate and initialize command and packet structures.
1603  *
1604  * If the controller supports fewer than MLY_MAX_CCBS commands, limit our
1605  * allocation to that number.  If we don't yet know how many commands the
1606  * controller supports, allocate a very small set (suitable for initialization
1607  * purposes only).
1608  */
1609 static int
mly_alloc_ccbs(struct mly_softc * mly)1610 mly_alloc_ccbs(struct mly_softc *mly)
1611 {
1612 	struct mly_ccb *mc;
1613 	int i, rv;
1614 
1615 	if (mly->mly_controllerinfo == NULL)
1616 		mly->mly_ncmds = MLY_CCBS_RESV;
1617 	else {
1618 		i = le16toh(mly->mly_controllerinfo->maximum_parallel_commands);
1619 		mly->mly_ncmds = min(MLY_MAX_CCBS, i);
1620 	}
1621 
1622 	/*
1623 	 * Allocate enough space for all the command packets in one chunk
1624 	 * and map them permanently into controller-visible space.
1625 	 */
1626 	rv = mly_dmamem_alloc(mly,
1627 	    mly->mly_ncmds * sizeof(union mly_cmd_packet),
1628 	    &mly->mly_pkt_dmamap, (void **)&mly->mly_pkt,
1629 	    &mly->mly_pkt_busaddr, &mly->mly_pkt_seg);
1630 	if (rv)
1631 		return (rv);
1632 
1633 	mly->mly_ccbs = malloc(sizeof(struct mly_ccb) * mly->mly_ncmds,
1634 	    M_DEVBUF, M_NOWAIT|M_ZERO);
1635 
1636 	for (i = 0; i < mly->mly_ncmds; i++) {
1637 		mc = mly->mly_ccbs + i;
1638 		mc->mc_slot = MLY_SLOT_START + i;
1639 		mc->mc_packet = mly->mly_pkt + i;
1640 		mc->mc_packetphys = mly->mly_pkt_busaddr +
1641 		    (i * sizeof(union mly_cmd_packet));
1642 
1643 		rv = bus_dmamap_create(mly->mly_dmat, MLY_MAX_XFER,
1644 		    MLY_MAX_SEGS, MLY_MAX_XFER, 0,
1645 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1646 		    &mc->mc_datamap);
1647 		if (rv) {
1648 			mly_release_ccbs(mly);
1649 			return (rv);
1650 		}
1651 
1652 		mly_ccb_free(mly, mc);
1653 	}
1654 
1655 	return (0);
1656 }
1657 
1658 /*
1659  * Free all the storage held by commands.
1660  *
1661  * Must be called with all commands on the free list.
1662  */
1663 static void
mly_release_ccbs(struct mly_softc * mly)1664 mly_release_ccbs(struct mly_softc *mly)
1665 {
1666 	struct mly_ccb *mc;
1667 
1668 	/* Throw away command buffer DMA maps. */
1669 	while (mly_ccb_alloc(mly, &mc) == 0)
1670 		bus_dmamap_destroy(mly->mly_dmat, mc->mc_datamap);
1671 
1672 	/* Release CCB storage. */
1673 	free(mly->mly_ccbs, M_DEVBUF);
1674 
1675 	/* Release the packet storage. */
1676 	mly_dmamem_free(mly, mly->mly_ncmds * sizeof(union mly_cmd_packet),
1677 	    mly->mly_pkt_dmamap, (void *)mly->mly_pkt, &mly->mly_pkt_seg);
1678 }
1679 
1680 /*
1681  * Map a command into controller-visible space.
1682  */
1683 static int
mly_ccb_map(struct mly_softc * mly,struct mly_ccb * mc)1684 mly_ccb_map(struct mly_softc *mly, struct mly_ccb *mc)
1685 {
1686 	struct mly_cmd_generic *gen;
1687 	struct mly_sg_entry *sg;
1688 	bus_dma_segment_t *ds;
1689 	int flg, nseg, rv;
1690 
1691 #ifdef DIAGNOSTIC
1692 	/* Don't map more than once. */
1693 	if ((mc->mc_flags & MLY_CCB_MAPPED) != 0)
1694 		panic("mly_ccb_map: already mapped");
1695 	mc->mc_flags |= MLY_CCB_MAPPED;
1696 
1697 	/* Does the command have a data buffer? */
1698 	if (mc->mc_data == NULL)
1699 		panic("mly_ccb_map: no data buffer");
1700 #endif
1701 
1702 	rv = bus_dmamap_load(mly->mly_dmat, mc->mc_datamap, mc->mc_data,
1703 	    mc->mc_length, NULL, BUS_DMA_NOWAIT | BUS_DMA_STREAMING |
1704 	    ((mc->mc_flags & MLY_CCB_DATAIN) != 0 ?
1705 	    BUS_DMA_READ : BUS_DMA_WRITE));
1706 	if (rv != 0)
1707 		return (rv);
1708 
1709 	gen = &mc->mc_packet->generic;
1710 
1711 	/*
1712 	 * Can we use the transfer structure directly?
1713 	 */
1714 	if ((nseg = mc->mc_datamap->dm_nsegs) <= 2) {
1715 		mc->mc_sgoff = -1;
1716 		sg = &gen->transfer.direct.sg[0];
1717 	} else {
1718 		mc->mc_sgoff = (mc->mc_slot - MLY_SLOT_START) *
1719 		    MLY_MAX_SEGS;
1720 		sg = mly->mly_sg + mc->mc_sgoff;
1721 		gen->command_control |= MLY_CMDCTL_EXTENDED_SG_TABLE;
1722 		gen->transfer.indirect.entries[0] = htole16(nseg);
1723 		gen->transfer.indirect.table_physaddr[0] =
1724 		    htole64(mly->mly_sg_busaddr +
1725 		    (mc->mc_sgoff * sizeof(struct mly_sg_entry)));
1726 	}
1727 
1728 	/*
1729 	 * Fill the S/G table.
1730 	 */
1731 	for (ds = mc->mc_datamap->dm_segs; nseg != 0; nseg--, sg++, ds++) {
1732 		sg->physaddr = htole64(ds->ds_addr);
1733 		sg->length = htole64(ds->ds_len);
1734 	}
1735 
1736 	/*
1737 	 * Sync up the data map.
1738 	 */
1739 	if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1740 		flg = BUS_DMASYNC_PREREAD;
1741 	else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */ {
1742 		gen->command_control |= MLY_CMDCTL_DATA_DIRECTION;
1743 		flg = BUS_DMASYNC_PREWRITE;
1744 	}
1745 
1746 	bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1747 
1748 	/*
1749 	 * Sync up the chained S/G table, if we're using one.
1750 	 */
1751 	if (mc->mc_sgoff == -1)
1752 		return (0);
1753 
1754 	bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1755 	    MLY_SGL_SIZE, BUS_DMASYNC_PREWRITE);
1756 
1757 	return (0);
1758 }
1759 
1760 /*
1761  * Unmap a command from controller-visible space.
1762  */
1763 static void
mly_ccb_unmap(struct mly_softc * mly,struct mly_ccb * mc)1764 mly_ccb_unmap(struct mly_softc *mly, struct mly_ccb *mc)
1765 {
1766 	int flg;
1767 
1768 #ifdef DIAGNOSTIC
1769 	if ((mc->mc_flags & MLY_CCB_MAPPED) == 0)
1770 		panic("mly_ccb_unmap: not mapped");
1771 	mc->mc_flags &= ~MLY_CCB_MAPPED;
1772 #endif
1773 
1774 	if ((mc->mc_flags & MLY_CCB_DATAIN) != 0)
1775 		flg = BUS_DMASYNC_POSTREAD;
1776 	else /* if ((mc->mc_flags & MLY_CCB_DATAOUT) != 0) */
1777 		flg = BUS_DMASYNC_POSTWRITE;
1778 
1779 	bus_dmamap_sync(mly->mly_dmat, mc->mc_datamap, 0, mc->mc_length, flg);
1780 	bus_dmamap_unload(mly->mly_dmat, mc->mc_datamap);
1781 
1782 	if (mc->mc_sgoff == -1)
1783 		return;
1784 
1785 	bus_dmamap_sync(mly->mly_dmat, mly->mly_sg_dmamap, mc->mc_sgoff,
1786 	    MLY_SGL_SIZE, BUS_DMASYNC_POSTWRITE);
1787 }
1788 
1789 /*
1790  * Adjust the size of each I/O before it passes to the SCSI layer.
1791  */
1792 static void
mly_scsipi_minphys(struct buf * bp)1793 mly_scsipi_minphys(struct buf *bp)
1794 {
1795 
1796 	if (bp->b_bcount > MLY_MAX_XFER)
1797 		bp->b_bcount = MLY_MAX_XFER;
1798 	minphys(bp);
1799 }
1800 
1801 /*
1802  * Start a SCSI command.
1803  */
1804 static void
mly_scsipi_request(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)1805 mly_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
1806 		   void *arg)
1807 {
1808 	struct mly_ccb *mc;
1809 	struct mly_cmd_scsi_small *ss;
1810 	struct scsipi_xfer *xs;
1811 	struct scsipi_periph *periph;
1812 	struct mly_softc *mly;
1813 	struct mly_btl *btl;
1814 	int s, tmp;
1815 
1816 	mly = device_private(chan->chan_adapter->adapt_dev);
1817 
1818 	switch (req) {
1819 	case ADAPTER_REQ_RUN_XFER:
1820 		xs = arg;
1821 		periph = xs->xs_periph;
1822 		btl = &mly->mly_btl[chan->chan_channel][periph->periph_target];
1823 		s = splbio();
1824 		tmp = btl->mb_flags;
1825 		splx(s);
1826 
1827 		/*
1828 		 * Check for I/O attempt to a protected or non-existant
1829 		 * device.
1830 		 */
1831 		if ((tmp & MLY_BTL_PROTECTED) != 0) {
1832 			xs->error = XS_SELTIMEOUT;
1833 			scsipi_done(xs);
1834 			break;
1835 		}
1836 
1837 #ifdef DIAGNOSTIC
1838 		/* XXX Increase if/when we support large SCSI commands. */
1839 		if (xs->cmdlen > MLY_CMD_SCSI_SMALL_CDB) {
1840 			printf("%s: cmd too large\n", device_xname(mly->mly_dv));
1841 			xs->error = XS_DRIVER_STUFFUP;
1842 			scsipi_done(xs);
1843 			break;
1844 		}
1845 #endif
1846 
1847 		if (mly_ccb_alloc(mly, &mc)) {
1848 			xs->error = XS_RESOURCE_SHORTAGE;
1849 			scsipi_done(xs);
1850 			break;
1851 		}
1852 
1853 		/* Build the command. */
1854 		mc->mc_data = xs->data;
1855 		mc->mc_length = xs->datalen;
1856 		mc->mc_complete = mly_scsipi_complete;
1857 		mc->mc_private = xs;
1858 
1859 		/* Build the packet for the controller. */
1860 		ss = &mc->mc_packet->scsi_small;
1861 		ss->opcode = MDACMD_SCSI;
1862 #ifdef notdef
1863 		/*
1864 		 * XXX FreeBSD does this, but it doesn't fix anything,
1865 		 * XXX and appears potentially harmful.
1866 		 */
1867 		ss->command_control |= MLY_CMDCTL_DISABLE_DISCONNECT;
1868 #endif
1869 
1870 		ss->data_size = htole32(xs->datalen);
1871 		_lto3l(MLY_PHYADDR(0, chan->chan_channel,
1872 		    periph->periph_target, periph->periph_lun), ss->addr);
1873 
1874 		if (xs->timeout < 60 * 1000)
1875 			ss->timeout = xs->timeout / 1000 |
1876 			    MLY_TIMEOUT_SECONDS;
1877 		else if (xs->timeout < 60 * 60 * 1000)
1878 			ss->timeout = xs->timeout / (60 * 1000) |
1879 			    MLY_TIMEOUT_MINUTES;
1880 		else
1881 			ss->timeout = xs->timeout / (60 * 60 * 1000) |
1882 			    MLY_TIMEOUT_HOURS;
1883 
1884 		ss->maximum_sense_size = sizeof(xs->sense);
1885 		ss->cdb_length = xs->cmdlen;
1886 		memcpy(ss->cdb, xs->cmd, xs->cmdlen);
1887 
1888 		if (mc->mc_length != 0) {
1889 			if ((xs->xs_control & XS_CTL_DATA_OUT) != 0)
1890 				mc->mc_flags |= MLY_CCB_DATAOUT;
1891 			else /* if ((xs->xs_control & XS_CTL_DATA_IN) != 0) */
1892 				mc->mc_flags |= MLY_CCB_DATAIN;
1893 
1894 			if (mly_ccb_map(mly, mc) != 0) {
1895 				xs->error = XS_DRIVER_STUFFUP;
1896 				mly_ccb_free(mly, mc);
1897 				scsipi_done(xs);
1898 				break;
1899 			}
1900 		}
1901 
1902 		/*
1903 		 * Give the command to the controller.
1904 		 */
1905 		if ((xs->xs_control & XS_CTL_POLL) != 0) {
1906 			if (mly_ccb_poll(mly, mc, xs->timeout + 5000)) {
1907 				xs->error = XS_REQUEUE;
1908 				if (mc->mc_length != 0)
1909 					mly_ccb_unmap(mly, mc);
1910 				mly_ccb_free(mly, mc);
1911 				scsipi_done(xs);
1912 			}
1913 		} else
1914 			mly_ccb_enqueue(mly, mc);
1915 
1916 		break;
1917 
1918 	case ADAPTER_REQ_GROW_RESOURCES:
1919 		/*
1920 		 * Not supported.
1921 		 */
1922 		break;
1923 
1924 	case ADAPTER_REQ_SET_XFER_MODE:
1925 		/*
1926 		 * We can't change the transfer mode, but at least let
1927 		 * scsipi know what the adapter has negotiated.
1928 		 */
1929 		mly_get_xfer_mode(mly, chan->chan_channel, arg);
1930 		break;
1931 	}
1932 }
1933 
1934 /*
1935  * Handle completion of a SCSI command.
1936  */
1937 static void
mly_scsipi_complete(struct mly_softc * mly,struct mly_ccb * mc)1938 mly_scsipi_complete(struct mly_softc *mly, struct mly_ccb *mc)
1939 {
1940 	struct scsipi_xfer *xs;
1941 	struct scsipi_channel *chan;
1942 	struct scsipi_inquiry_data *inq;
1943 	struct mly_btl *btl;
1944 	int target, sl, s;
1945 	const char *p;
1946 
1947 	xs = mc->mc_private;
1948 	xs->status = mc->mc_status;
1949 
1950 	/*
1951 	 * XXX The `resid' value as returned by the controller appears to be
1952 	 * bogus, so we always set it to zero.  Is it perhaps the transfer
1953 	 * count?
1954 	 */
1955 	xs->resid = 0; /* mc->mc_resid; */
1956 
1957 	if (mc->mc_length != 0)
1958 		mly_ccb_unmap(mly, mc);
1959 
1960 	switch (mc->mc_status) {
1961 	case SCSI_OK:
1962 		/*
1963 		 * In order to report logical device type and status, we
1964 		 * overwrite the result of the INQUIRY command to logical
1965 		 * devices.
1966 		 */
1967 		if (xs->cmd->opcode == INQUIRY) {
1968 			chan = xs->xs_periph->periph_channel;
1969 			target = xs->xs_periph->periph_target;
1970 			btl = &mly->mly_btl[chan->chan_channel][target];
1971 
1972 			s = splbio();
1973 			if ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) {
1974 				inq = (struct scsipi_inquiry_data *)xs->data;
1975 				mly_padstr(inq->vendor, "MYLEX", 8);
1976 				p = mly_describe_code(mly_table_device_type,
1977 				    btl->mb_type);
1978 				mly_padstr(inq->product, p, 16);
1979 				p = mly_describe_code(mly_table_device_state,
1980 				    btl->mb_state);
1981 				mly_padstr(inq->revision, p, 4);
1982 			}
1983 			splx(s);
1984 		}
1985 
1986 		xs->error = XS_NOERROR;
1987 		break;
1988 
1989 	case SCSI_CHECK:
1990 		sl = mc->mc_sense;
1991 		if (sl > sizeof(xs->sense.scsi_sense))
1992 			sl = sizeof(xs->sense.scsi_sense);
1993 		memcpy(&xs->sense.scsi_sense, mc->mc_packet, sl);
1994 		xs->error = XS_SENSE;
1995 		break;
1996 
1997 	case SCSI_BUSY:
1998 	case SCSI_QUEUE_FULL:
1999 		xs->error = XS_BUSY;
2000 		break;
2001 
2002 	default:
2003 		printf("%s: unknown SCSI status 0x%x\n",
2004 		    device_xname(mly->mly_dv), xs->status);
2005 		xs->error = XS_DRIVER_STUFFUP;
2006 		break;
2007 	}
2008 
2009 	mly_ccb_free(mly, mc);
2010 	scsipi_done(xs);
2011 }
2012 
2013 /*
2014  * Notify scsipi about a target's transfer mode.
2015  */
2016 static void
mly_get_xfer_mode(struct mly_softc * mly,int bus,struct scsipi_xfer_mode * xm)2017 mly_get_xfer_mode(struct mly_softc *mly, int bus, struct scsipi_xfer_mode *xm)
2018 {
2019 	struct mly_btl *btl;
2020 	int s;
2021 
2022 	btl = &mly->mly_btl[bus][xm->xm_target];
2023 	xm->xm_mode = 0;
2024 
2025 	s = splbio();
2026 
2027 	if ((btl->mb_flags & MLY_BTL_PHYSICAL) != 0) {
2028 		if (btl->mb_speed == 0) {
2029 			xm->xm_period = 0;
2030 			xm->xm_offset = 0;
2031 		} else {
2032 			xm->xm_period = 12;			/* XXX */
2033 			xm->xm_offset = 8;			/* XXX */
2034 			xm->xm_mode |= PERIPH_CAP_SYNC;		/* XXX */
2035 		}
2036 
2037 		switch (btl->mb_width) {
2038 		case 32:
2039 			xm->xm_mode = PERIPH_CAP_WIDE32;
2040 			break;
2041 		case 16:
2042 			xm->xm_mode = PERIPH_CAP_WIDE16;
2043 			break;
2044 		default:
2045 			xm->xm_mode = 0;
2046 			break;
2047 		}
2048 	} else /* ((btl->mb_flags & MLY_BTL_LOGICAL) != 0) */ {
2049 		xm->xm_mode = PERIPH_CAP_WIDE16 | PERIPH_CAP_SYNC;
2050 		xm->xm_period = 12;
2051 		xm->xm_offset = 8;
2052 	}
2053 
2054 	if ((btl->mb_flags & MLY_BTL_TQING) != 0)
2055 		xm->xm_mode |= PERIPH_CAP_TQING;
2056 
2057 	splx(s);
2058 
2059 	scsipi_async_event(&mly->mly_chans[bus], ASYNC_EVENT_XFER_MODE, xm);
2060 }
2061 
2062 /*
2063  * ioctl hook; used here only to initiate low-level rescans.
2064  */
2065 static int
mly_scsipi_ioctl(struct scsipi_channel * chan,u_long cmd,void * data,int flag,struct proc * p)2066 mly_scsipi_ioctl(struct scsipi_channel *chan, u_long cmd, void *data,
2067     int flag, struct proc *p)
2068 {
2069 	struct mly_softc *mly;
2070 	int rv;
2071 
2072 	mly = device_private(chan->chan_adapter->adapt_dev);
2073 
2074 	switch (cmd) {
2075 	case SCBUSIOLLSCAN:
2076 		mly_scan_channel(mly, chan->chan_channel);
2077 		rv = 0;
2078 		break;
2079 	default:
2080 		rv = ENOTTY;
2081 		break;
2082 	}
2083 
2084 	return (rv);
2085 }
2086 
2087 /*
2088  * Handshake with the firmware while the card is being initialized.
2089  */
2090 static int
mly_fwhandshake(struct mly_softc * mly)2091 mly_fwhandshake(struct mly_softc *mly)
2092 {
2093 	u_int8_t error;
2094 	int spinup;
2095 
2096 	spinup = 0;
2097 
2098 	/* Set HM_STSACK and let the firmware initialize. */
2099 	mly_outb(mly, mly->mly_idbr, MLY_HM_STSACK);
2100 	DELAY(1000);	/* too short? */
2101 
2102 	/* If HM_STSACK is still true, the controller is initializing. */
2103 	if (!mly_idbr_true(mly, MLY_HM_STSACK))
2104 		return (0);
2105 
2106 	printf("%s: controller initialization started\n",
2107 	    device_xname(mly->mly_dv));
2108 
2109 	/*
2110 	 * Spin waiting for initialization to finish, or for a message to be
2111 	 * delivered.
2112 	 */
2113 	while (mly_idbr_true(mly, MLY_HM_STSACK)) {
2114 		/* Check for a message */
2115 		if (!mly_error_valid(mly))
2116 			continue;
2117 
2118 		error = mly_inb(mly, mly->mly_error_status) & ~MLY_MSG_EMPTY;
2119 		(void)mly_inb(mly, mly->mly_cmd_mailbox);
2120 		(void)mly_inb(mly, mly->mly_cmd_mailbox + 1);
2121 
2122 		switch (error) {
2123 		case MLY_MSG_SPINUP:
2124 			if (!spinup) {
2125 				printf("%s: drive spinup in progress\n",
2126 				    device_xname(mly->mly_dv));
2127 				spinup = 1;
2128 			}
2129 			break;
2130 
2131 		case MLY_MSG_RACE_RECOVERY_FAIL:
2132 			printf("%s: mirror race recovery failed - \n",
2133 			    device_xname(mly->mly_dv));
2134 			printf("%s: one or more drives offline\n",
2135 			    device_xname(mly->mly_dv));
2136 			break;
2137 
2138 		case MLY_MSG_RACE_IN_PROGRESS:
2139 			printf("%s: mirror race recovery in progress\n",
2140 			    device_xname(mly->mly_dv));
2141 			break;
2142 
2143 		case MLY_MSG_RACE_ON_CRITICAL:
2144 			printf("%s: mirror race recovery on critical drive\n",
2145 			    device_xname(mly->mly_dv));
2146 			break;
2147 
2148 		case MLY_MSG_PARITY_ERROR:
2149 			printf("%s: FATAL MEMORY PARITY ERROR\n",
2150 			    device_xname(mly->mly_dv));
2151 			return (ENXIO);
2152 
2153 		default:
2154 			printf("%s: unknown initialization code 0x%x\n",
2155 			    device_xname(mly->mly_dv), error);
2156 			break;
2157 		}
2158 	}
2159 
2160 	return (0);
2161 }
2162 
2163 /*
2164  * Space-fill a character string
2165  */
2166 static void
mly_padstr(char * dst,const char * src,int len)2167 mly_padstr(char *dst, const char *src, int len)
2168 {
2169 
2170 	while (len-- > 0) {
2171 		if (*src != '\0')
2172 			*dst++ = *src++;
2173 		else
2174 			*dst++ = ' ';
2175 	}
2176 }
2177 
2178 /*
2179  * Allocate DMA safe memory.
2180  */
2181 static int
mly_dmamem_alloc(struct mly_softc * mly,int size,bus_dmamap_t * dmamap,void ** kva,bus_addr_t * paddr,bus_dma_segment_t * seg)2182 mly_dmamem_alloc(struct mly_softc *mly, int size, bus_dmamap_t *dmamap,
2183 		 void **kva, bus_addr_t *paddr, bus_dma_segment_t *seg)
2184 {
2185 	int rseg, rv, state;
2186 
2187 	state = 0;
2188 
2189 	if ((rv = bus_dmamem_alloc(mly->mly_dmat, size, PAGE_SIZE, 0,
2190 	    seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
2191 		aprint_error_dev(mly->mly_dv, "dmamem_alloc = %d\n", rv);
2192 		goto bad;
2193 	}
2194 
2195 	state++;
2196 
2197 	if ((rv = bus_dmamem_map(mly->mly_dmat, seg, 1, size, kva,
2198 	    BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) {
2199 		aprint_error_dev(mly->mly_dv, "dmamem_map = %d\n", rv);
2200 		goto bad;
2201 	}
2202 
2203 	state++;
2204 
2205 	if ((rv = bus_dmamap_create(mly->mly_dmat, size, size, 1, 0,
2206 	    BUS_DMA_NOWAIT, dmamap)) != 0) {
2207 		aprint_error_dev(mly->mly_dv, "dmamap_create = %d\n", rv);
2208 		goto bad;
2209 	}
2210 
2211 	state++;
2212 
2213 	if ((rv = bus_dmamap_load(mly->mly_dmat, *dmamap, *kva, size,
2214 	    NULL, BUS_DMA_NOWAIT)) != 0) {
2215 		aprint_error_dev(mly->mly_dv, "dmamap_load = %d\n", rv);
2216 		goto bad;
2217 	}
2218 
2219 	*paddr = (*dmamap)->dm_segs[0].ds_addr;
2220 	memset(*kva, 0, size);
2221 	return (0);
2222 
2223  bad:
2224 	if (state > 2)
2225 		bus_dmamap_destroy(mly->mly_dmat, *dmamap);
2226 	if (state > 1)
2227 		bus_dmamem_unmap(mly->mly_dmat, *kva, size);
2228 	if (state > 0)
2229 		bus_dmamem_free(mly->mly_dmat, seg, 1);
2230 
2231 	return (rv);
2232 }
2233 
2234 /*
2235  * Free DMA safe memory.
2236  */
2237 static void
mly_dmamem_free(struct mly_softc * mly,int size,bus_dmamap_t dmamap,void * kva,bus_dma_segment_t * seg)2238 mly_dmamem_free(struct mly_softc *mly, int size, bus_dmamap_t dmamap,
2239 		void *kva, bus_dma_segment_t *seg)
2240 {
2241 
2242 	bus_dmamap_unload(mly->mly_dmat, dmamap);
2243 	bus_dmamap_destroy(mly->mly_dmat, dmamap);
2244 	bus_dmamem_unmap(mly->mly_dmat, kva, size);
2245 	bus_dmamem_free(mly->mly_dmat, seg, 1);
2246 }
2247 
2248 
2249 /*
2250  * Accept an open operation on the control device.
2251  */
2252 int
mlyopen(dev_t dev,int flag,int mode,struct lwp * l)2253 mlyopen(dev_t dev, int flag, int mode, struct lwp *l)
2254 {
2255 	struct mly_softc *mly;
2256 
2257 	if ((mly = device_lookup_private(&mly_cd, minor(dev))) == NULL)
2258 		return (ENXIO);
2259 	if ((mly->mly_state & MLY_STATE_INITOK) == 0)
2260 		return (ENXIO);
2261 	if ((mly->mly_state & MLY_STATE_OPEN) != 0)
2262 		return (EBUSY);
2263 
2264 	mly->mly_state |= MLY_STATE_OPEN;
2265 	return (0);
2266 }
2267 
2268 /*
2269  * Accept the last close on the control device.
2270  */
2271 int
mlyclose(dev_t dev,int flag,int mode,struct lwp * l)2272 mlyclose(dev_t dev, int flag, int mode,
2273     struct lwp *l)
2274 {
2275 	struct mly_softc *mly;
2276 
2277 	mly = device_lookup_private(&mly_cd, minor(dev));
2278 	mly->mly_state &= ~MLY_STATE_OPEN;
2279 	return (0);
2280 }
2281 
2282 /*
2283  * Handle control operations.
2284  */
2285 int
mlyioctl(dev_t dev,u_long cmd,void * data,int flag,struct lwp * l)2286 mlyioctl(dev_t dev, u_long cmd, void *data, int flag,
2287     struct lwp *l)
2288 {
2289 	struct mly_softc *mly;
2290 	int rv;
2291 
2292 	mly = device_lookup_private(&mly_cd, minor(dev));
2293 
2294 	switch (cmd) {
2295 	case MLYIO_COMMAND:
2296 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
2297 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2298 		if (rv)
2299 			break;
2300 
2301 		rv = mly_user_command(mly, (void *)data);
2302 		break;
2303 	case MLYIO_HEALTH:
2304 		rv = mly_user_health(mly, (void *)data);
2305 		break;
2306 	default:
2307 		rv = ENOTTY;
2308 		break;
2309 	}
2310 
2311 	return (rv);
2312 }
2313 
2314 /*
2315  * Execute a command passed in from userspace.
2316  *
2317  * The control structure contains the actual command for the controller, as
2318  * well as the user-space data pointer and data size, and an optional sense
2319  * buffer size/pointer.  On completion, the data size is adjusted to the
2320  * command residual, and the sense buffer size to the size of the returned
2321  * sense data.
2322  */
2323 static int
mly_user_command(struct mly_softc * mly,struct mly_user_command * uc)2324 mly_user_command(struct mly_softc *mly, struct mly_user_command *uc)
2325 {
2326 	struct mly_ccb	*mc;
2327 	int rv, mapped;
2328 
2329 	if ((rv = mly_ccb_alloc(mly, &mc)) != 0)
2330 		return (rv);
2331 
2332 	mapped = 0;
2333 	mc->mc_data = NULL;
2334 
2335 	/*
2336 	 * Handle data size/direction.
2337 	 */
2338 	if ((mc->mc_length = abs(uc->DataTransferLength)) != 0) {
2339 		if (mc->mc_length > MAXPHYS) {
2340 			rv = EINVAL;
2341 			goto out;
2342 		}
2343 
2344 		mc->mc_data = malloc(mc->mc_length, M_DEVBUF, M_WAITOK);
2345 		if (mc->mc_data == NULL) {
2346 			rv = ENOMEM;
2347 			goto out;
2348 		}
2349 
2350 		if (uc->DataTransferLength > 0) {
2351 			mc->mc_flags |= MLY_CCB_DATAIN;
2352 			memset(mc->mc_data, 0, mc->mc_length);
2353 		}
2354 
2355 		if (uc->DataTransferLength < 0) {
2356 			mc->mc_flags |= MLY_CCB_DATAOUT;
2357 			rv = copyin(uc->DataTransferBuffer, mc->mc_data,
2358 			    mc->mc_length);
2359 			if (rv != 0)
2360 				goto out;
2361 		}
2362 
2363 		if ((rv = mly_ccb_map(mly, mc)) != 0)
2364 			goto out;
2365 		mapped = 1;
2366 	}
2367 
2368 	/* Copy in the command and execute it. */
2369 	memcpy(mc->mc_packet, &uc->CommandMailbox, sizeof(uc->CommandMailbox));
2370 
2371 	if ((rv = mly_ccb_wait(mly, mc, 60000)) != 0)
2372 		goto out;
2373 
2374 	/* Return the data to userspace. */
2375 	if (uc->DataTransferLength > 0) {
2376 		rv = copyout(mc->mc_data, uc->DataTransferBuffer,
2377 		    mc->mc_length);
2378 		if (rv != 0)
2379 			goto out;
2380 	}
2381 
2382 	/* Return the sense buffer to userspace. */
2383 	if (uc->RequestSenseLength > 0 && mc->mc_sense > 0) {
2384 		rv = copyout(mc->mc_packet, uc->RequestSenseBuffer,
2385 		    min(uc->RequestSenseLength, mc->mc_sense));
2386 		if (rv != 0)
2387 			goto out;
2388 	}
2389 
2390 	/* Return command results to userspace (caller will copy out). */
2391 	uc->DataTransferLength = mc->mc_resid;
2392 	uc->RequestSenseLength = min(uc->RequestSenseLength, mc->mc_sense);
2393 	uc->CommandStatus = mc->mc_status;
2394 	rv = 0;
2395 
2396  out:
2397  	if (mapped)
2398  		mly_ccb_unmap(mly, mc);
2399 	if (mc->mc_data != NULL)
2400 		free(mc->mc_data, M_DEVBUF);
2401 	mly_ccb_free(mly, mc);
2402 
2403 	return (rv);
2404 }
2405 
2406 /*
2407  * Return health status to userspace.  If the health change index in the
2408  * user structure does not match that currently exported by the controller,
2409  * we return the current status immediately.  Otherwise, we block until
2410  * either interrupted or new status is delivered.
2411  */
2412 static int
mly_user_health(struct mly_softc * mly,struct mly_user_health * uh)2413 mly_user_health(struct mly_softc *mly, struct mly_user_health *uh)
2414 {
2415 	struct mly_health_status mh;
2416 	int rv, s;
2417 
2418 	/* Fetch the current health status from userspace. */
2419 	rv = copyin(uh->HealthStatusBuffer, &mh, sizeof(mh));
2420 	if (rv != 0)
2421 		return (rv);
2422 
2423 	/* spin waiting for a status update */
2424 	s = splbio();
2425 	if (mly->mly_event_change == mh.change_counter)
2426 		rv = tsleep(&mly->mly_event_change, PRIBIO | PCATCH,
2427 		    "mlyhealth", 0);
2428 	splx(s);
2429 
2430 	if (rv == 0) {
2431 		/*
2432 		 * Copy the controller's health status buffer out (there is
2433 		 * a race here if it changes again).
2434 		 */
2435 		rv = copyout(&mly->mly_mmbox->mmm_health.status,
2436 		    uh->HealthStatusBuffer, sizeof(uh->HealthStatusBuffer));
2437 	}
2438 
2439 	return (rv);
2440 }
2441