1 /*-
2 * Copyright (c) 1999 Michael Smith
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: src/sys/dev/mlx/mlx.c,v 1.14.2.5 2001/09/11 09:49:53 kris Exp $
27 */
28
29 /*
30 * Driver for the Mylex DAC960 family of RAID controllers.
31 */
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
37 #include <sys/bus.h>
38 #include <sys/conf.h>
39 #include <sys/devicestat.h>
40 #include <sys/disk.h>
41 #include <sys/stat.h>
42 #include <sys/rman.h>
43 #include <sys/thread2.h>
44
45 #include <machine/clock.h>
46
47 #include "mlx_compat.h"
48 #include "mlxio.h"
49 #include "mlxvar.h"
50 #include "mlxreg.h"
51
52 static struct dev_ops mlx_ops = {
53 { "mlx", 0, 0 },
54 .d_open = mlx_open,
55 .d_close = mlx_close,
56 .d_ioctl = mlx_ioctl,
57 };
58
59 devclass_t mlx_devclass;
60
61 /*
62 * Per-interface accessor methods
63 */
64 static int mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
65 static int mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status);
66 static void mlx_v3_intaction(struct mlx_softc *sc, int action);
67 static int mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2);
68
69 static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
70 static int mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status);
71 static void mlx_v4_intaction(struct mlx_softc *sc, int action);
72 static int mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2);
73
74 static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
75 static int mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status);
76 static void mlx_v5_intaction(struct mlx_softc *sc, int action);
77 static int mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2);
78
79 /*
80 * Status monitoring
81 */
82 static void mlx_periodic(void *data);
83 static void mlx_periodic_enquiry(struct mlx_command *mc);
84 static void mlx_periodic_eventlog_poll(struct mlx_softc *sc);
85 static void mlx_periodic_eventlog_respond(struct mlx_command *mc);
86 static void mlx_periodic_rebuild(struct mlx_command *mc);
87
88 /*
89 * Channel Pause
90 */
91 static void mlx_pause_action(struct mlx_softc *sc);
92 static void mlx_pause_done(struct mlx_command *mc);
93
94 /*
95 * Command submission.
96 */
97 static void *mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize,
98 void (*complete)(struct mlx_command *mc));
99 static int mlx_flush(struct mlx_softc *sc);
100 static int mlx_check(struct mlx_softc *sc, int drive);
101 static int mlx_rebuild(struct mlx_softc *sc, int channel, int target);
102 static int mlx_wait_command(struct mlx_command *mc);
103 static int mlx_poll_command(struct mlx_command *mc);
104 static void mlx_startio(struct mlx_softc *sc);
105 static void mlx_completeio(struct mlx_command *mc);
106 static int mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu);
107
108 /*
109 * Command buffer allocation.
110 */
111 static struct mlx_command *mlx_alloccmd(struct mlx_softc *sc);
112 static void mlx_releasecmd(struct mlx_command *mc);
113 static void mlx_freecmd(struct mlx_command *mc);
114
115 /*
116 * Command management.
117 */
118 static int mlx_getslot(struct mlx_command *mc);
119 static void mlx_mapcmd(struct mlx_command *mc);
120 static void mlx_unmapcmd(struct mlx_command *mc);
121 static int mlx_start(struct mlx_command *mc);
122 static int mlx_done(struct mlx_softc *sc);
123 static void mlx_complete(struct mlx_softc *sc);
124
125 /*
126 * Debugging.
127 */
128 static char *mlx_diagnose_command(struct mlx_command *mc);
129 static void mlx_describe_controller(struct mlx_softc *sc);
130 static int mlx_fw_message(struct mlx_softc *sc, int status, int param1, int param2);
131
132 /*
133 * Utility functions.
134 */
135 static struct mlx_sysdrive *mlx_findunit(struct mlx_softc *sc, int unit);
136
137 /********************************************************************************
138 ********************************************************************************
139 Public Interfaces
140 ********************************************************************************
141 ********************************************************************************/
142
143 /********************************************************************************
144 * Free all of the resources associated with (sc)
145 *
146 * Should not be called if the controller is active.
147 */
148 void
mlx_free(struct mlx_softc * sc)149 mlx_free(struct mlx_softc *sc)
150 {
151 struct mlx_command *mc;
152
153 debug_called(1);
154
155 /* cancel status timeout */
156 callout_stop(&sc->mlx_timeout);
157
158 /* throw away any command buffers */
159 while ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) {
160 TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link);
161 mlx_freecmd(mc);
162 }
163
164 /* destroy data-transfer DMA tag */
165 if (sc->mlx_buffer_dmat)
166 bus_dma_tag_destroy(sc->mlx_buffer_dmat);
167
168 /* free and destroy DMA memory and tag for s/g lists */
169 if (sc->mlx_sgtable)
170 bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap);
171 if (sc->mlx_sg_dmat)
172 bus_dma_tag_destroy(sc->mlx_sg_dmat);
173
174 /* disconnect the interrupt handler */
175 if (sc->mlx_intr)
176 bus_teardown_intr(sc->mlx_dev, sc->mlx_irq, sc->mlx_intr);
177 if (sc->mlx_irq != NULL)
178 bus_release_resource(sc->mlx_dev, SYS_RES_IRQ, 0, sc->mlx_irq);
179
180 /* destroy the parent DMA tag */
181 if (sc->mlx_parent_dmat)
182 bus_dma_tag_destroy(sc->mlx_parent_dmat);
183
184 /* release the register window mapping */
185 if (sc->mlx_mem != NULL)
186 bus_release_resource(sc->mlx_dev, sc->mlx_mem_type, sc->mlx_mem_rid, sc->mlx_mem);
187
188 /* free controller enquiry data */
189 if (sc->mlx_enq2 != NULL)
190 kfree(sc->mlx_enq2, M_DEVBUF);
191
192 dev_ops_remove_minor(&mlx_ops, device_get_unit(sc->mlx_dev));
193 }
194
195 /********************************************************************************
196 * Map the scatter/gather table into bus space
197 */
198 static void
mlx_dma_map_sg(void * arg,bus_dma_segment_t * segs,int nseg,int error)199 mlx_dma_map_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
200 {
201 struct mlx_softc *sc = (struct mlx_softc *)arg;
202
203 debug_called(1);
204
205 /* save base of s/g table's address in bus space */
206 sc->mlx_sgbusaddr = segs->ds_addr;
207 }
208
209 static int
mlx_sglist_map(struct mlx_softc * sc)210 mlx_sglist_map(struct mlx_softc *sc)
211 {
212 size_t segsize;
213 int error, ncmd;
214
215 debug_called(1);
216
217 /* destroy any existing mappings */
218 if (sc->mlx_sgtable)
219 bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap);
220 if (sc->mlx_sg_dmat)
221 bus_dma_tag_destroy(sc->mlx_sg_dmat);
222
223 /*
224 * Create a single tag describing a region large enough to hold all of
225 * the s/g lists we will need. If we're called early on, we don't know how
226 * many commands we're going to be asked to support, so only allocate enough
227 * for a couple.
228 */
229 if (sc->mlx_enq2 == NULL) {
230 ncmd = 2;
231 } else {
232 ncmd = sc->mlx_enq2->me_max_commands;
233 }
234 segsize = sizeof(struct mlx_sgentry) * MLX_NSEG * ncmd;
235 error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */
236 1, 0, /* alignment, boundary */
237 BUS_SPACE_MAXADDR, /* lowaddr */
238 BUS_SPACE_MAXADDR, /* highaddr */
239 segsize, 1, /* maxsize, nsegments */
240 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
241 0, /* flags */
242 &sc->mlx_sg_dmat);
243 if (error != 0) {
244 device_printf(sc->mlx_dev, "can't allocate scatter/gather DMA tag\n");
245 return(ENOMEM);
246 }
247
248 /*
249 * Allocate enough s/g maps for all commands and permanently map them into
250 * controller-visible space.
251 *
252 * XXX this assumes we can get enough space for all the s/g maps in one
253 * contiguous slab. We may need to switch to a more complex arrangement where
254 * we allocate in smaller chunks and keep a lookup table from slot to bus address.
255 */
256 error = bus_dmamem_alloc(sc->mlx_sg_dmat, (void *)&sc->mlx_sgtable, BUS_DMA_NOWAIT, &sc->mlx_sg_dmamap);
257 if (error) {
258 device_printf(sc->mlx_dev, "can't allocate s/g table\n");
259 return(ENOMEM);
260 }
261 bus_dmamap_load(sc->mlx_sg_dmat, sc->mlx_sg_dmamap, sc->mlx_sgtable, segsize, mlx_dma_map_sg, sc, 0);
262 return(0);
263 }
264
265 /********************************************************************************
266 * Initialise the controller and softc
267 */
268 int
mlx_attach(struct mlx_softc * sc)269 mlx_attach(struct mlx_softc *sc)
270 {
271 struct mlx_enquiry_old *meo;
272 int rid, error, fwminor, hscode, hserror, hsparam1, hsparam2, hsmsg;
273
274 debug_called(1);
275 callout_init(&sc->mlx_timeout);
276
277 /*
278 * Initialise per-controller queues.
279 */
280 TAILQ_INIT(&sc->mlx_work);
281 TAILQ_INIT(&sc->mlx_freecmds);
282 bioq_init(&sc->mlx_bioq);
283
284 /*
285 * Select accessor methods based on controller interface type.
286 */
287 switch(sc->mlx_iftype) {
288 case MLX_IFTYPE_2:
289 case MLX_IFTYPE_3:
290 sc->mlx_tryqueue = mlx_v3_tryqueue;
291 sc->mlx_findcomplete = mlx_v3_findcomplete;
292 sc->mlx_intaction = mlx_v3_intaction;
293 sc->mlx_fw_handshake = mlx_v3_fw_handshake;
294 break;
295 case MLX_IFTYPE_4:
296 sc->mlx_tryqueue = mlx_v4_tryqueue;
297 sc->mlx_findcomplete = mlx_v4_findcomplete;
298 sc->mlx_intaction = mlx_v4_intaction;
299 sc->mlx_fw_handshake = mlx_v4_fw_handshake;
300 break;
301 case MLX_IFTYPE_5:
302 sc->mlx_tryqueue = mlx_v5_tryqueue;
303 sc->mlx_findcomplete = mlx_v5_findcomplete;
304 sc->mlx_intaction = mlx_v5_intaction;
305 sc->mlx_fw_handshake = mlx_v5_fw_handshake;
306 break;
307 default:
308 mlx_free(sc);
309 return(ENXIO); /* should never happen */
310 }
311
312 /* disable interrupts before we start talking to the controller */
313 sc->mlx_intaction(sc, MLX_INTACTION_DISABLE);
314
315 /*
316 * Wait for the controller to come ready, handshake with the firmware if required.
317 * This is typically only necessary on platforms where the controller BIOS does not
318 * run.
319 */
320 hsmsg = 0;
321 DELAY(1000);
322 while ((hscode = sc->mlx_fw_handshake(sc, &hserror, &hsparam1, &hsparam2)) != 0) {
323 /* report first time around... */
324 if (hsmsg == 0) {
325 device_printf(sc->mlx_dev, "controller initialisation in progress...\n");
326 hsmsg = 1;
327 }
328 /* did we get a real message? */
329 if (hscode == 2) {
330 hscode = mlx_fw_message(sc, hserror, hsparam1, hsparam2);
331 /* fatal initialisation error? */
332 if (hscode != 0) {
333 mlx_free(sc);
334 return(ENXIO);
335 }
336 }
337 }
338 if (hsmsg == 1)
339 device_printf(sc->mlx_dev, "initialisation complete.\n");
340
341 /*
342 * Allocate and connect our interrupt.
343 */
344 rid = 0;
345 sc->mlx_irq = bus_alloc_resource(sc->mlx_dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE);
346 if (sc->mlx_irq == NULL) {
347 device_printf(sc->mlx_dev, "can't allocate interrupt\n");
348 mlx_free(sc);
349 return(ENXIO);
350 }
351 error = bus_setup_intr(sc->mlx_dev, sc->mlx_irq,
352 0, mlx_intr, sc,
353 &sc->mlx_intr, NULL);
354 if (error) {
355 device_printf(sc->mlx_dev, "can't set up interrupt\n");
356 mlx_free(sc);
357 return(ENXIO);
358 }
359
360 /*
361 * Create DMA tag for mapping buffers into controller-addressable space.
362 */
363 error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */
364 1, 0, /* alignment, boundary */
365 BUS_SPACE_MAXADDR, /* lowaddr */
366 BUS_SPACE_MAXADDR, /* highaddr */
367 MAXBSIZE, MLX_NSEG, /* maxsize, nsegments */
368 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
369 0, /* flags */
370 &sc->mlx_buffer_dmat);
371 if (error != 0) {
372 device_printf(sc->mlx_dev, "can't allocate buffer DMA tag\n");
373 mlx_free(sc);
374 return(ENOMEM);
375 }
376
377 /*
378 * Create some initial scatter/gather mappings so we can run the probe commands.
379 */
380 error = mlx_sglist_map(sc);
381 if (error != 0) {
382 device_printf(sc->mlx_dev, "can't make initial s/g list mapping\n");
383 mlx_free(sc);
384 return(error);
385 }
386
387 /*
388 * We don't (yet) know where the event log is up to.
389 */
390 sc->mlx_currevent = -1;
391
392 /*
393 * Obtain controller feature information
394 */
395 if ((sc->mlx_enq2 = mlx_enquire(sc, MLX_CMD_ENQUIRY2, sizeof(struct mlx_enquiry2), NULL)) == NULL) {
396 device_printf(sc->mlx_dev, "ENQUIRY2 failed\n");
397 mlx_free(sc);
398 return(ENXIO);
399 }
400
401 /*
402 * Do quirk/feature related things.
403 */
404 fwminor = (sc->mlx_enq2->me_firmware_id >> 8) & 0xff;
405 switch(sc->mlx_iftype) {
406 case MLX_IFTYPE_2:
407 /* These controllers don't report the firmware version in the ENQUIRY2 response */
408 if ((meo = mlx_enquire(sc, MLX_CMD_ENQUIRY_OLD, sizeof(struct mlx_enquiry_old), NULL)) == NULL) {
409 device_printf(sc->mlx_dev, "ENQUIRY_OLD failed\n");
410 mlx_free(sc);
411 return(ENXIO);
412 }
413 sc->mlx_enq2->me_firmware_id = ('0' << 24) | (0 << 16) | (meo->me_fwminor << 8) | meo->me_fwmajor;
414
415 /* XXX require 2.42 or better (PCI) or 2.14 or better (EISA) */
416 if (meo->me_fwminor < 42) {
417 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n");
418 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 2.42 or later\n");
419 }
420 kfree(meo, M_DEVBUF);
421 break;
422 case MLX_IFTYPE_3:
423 /* XXX certify 3.52? */
424 if (fwminor < 51) {
425 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n");
426 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 3.51 or later\n");
427 }
428 break;
429 case MLX_IFTYPE_4:
430 /* XXX certify firmware versions? */
431 if (fwminor < 6) {
432 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n");
433 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 4.06 or later\n");
434 }
435 break;
436 case MLX_IFTYPE_5:
437 if (fwminor < 7) {
438 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n");
439 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 5.07 or later\n");
440 }
441 break;
442 default:
443 mlx_free(sc);
444 return(ENXIO); /* should never happen */
445 }
446
447 /*
448 * Create the final scatter/gather mappings now that we have characterised the controller.
449 */
450 error = mlx_sglist_map(sc);
451 if (error != 0) {
452 device_printf(sc->mlx_dev, "can't make final s/g list mapping\n");
453 mlx_free(sc);
454 return(error);
455 }
456
457 /*
458 * No user-requested background operation is in progress.
459 */
460 sc->mlx_background = 0;
461 sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE;
462
463 /*
464 * Create the control device.
465 */
466 make_dev(&mlx_ops, device_get_unit(sc->mlx_dev),
467 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR,
468 "mlx%d", device_get_unit(sc->mlx_dev));
469
470 /*
471 * Start the timeout routine.
472 */
473 callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc);
474
475 /* print a little information about the controller */
476 mlx_describe_controller(sc);
477
478 return(0);
479 }
480
481 /********************************************************************************
482 * Locate disk resources and attach children to them.
483 */
484 void
mlx_startup(struct mlx_softc * sc)485 mlx_startup(struct mlx_softc *sc)
486 {
487 struct mlx_enq_sys_drive *mes;
488 struct mlx_sysdrive *dr;
489 int i, error;
490
491 debug_called(1);
492
493 /*
494 * Scan all the system drives and attach children for those that
495 * don't currently have them.
496 */
497 mes = mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(*mes) * MLX_MAXDRIVES, NULL);
498 if (mes == NULL) {
499 device_printf(sc->mlx_dev, "error fetching drive status\n");
500 return;
501 }
502
503 /* iterate over drives returned */
504 for (i = 0, dr = &sc->mlx_sysdrive[0];
505 (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff);
506 i++, dr++) {
507 /* are we already attached to this drive? */
508 if (dr->ms_disk == NULL) {
509 /* pick up drive information */
510 dr->ms_size = mes[i].sd_size;
511 dr->ms_raidlevel = mes[i].sd_raidlevel & 0xf;
512 dr->ms_state = mes[i].sd_state;
513
514 /* generate geometry information */
515 if (sc->mlx_geom == MLX_GEOM_128_32) {
516 dr->ms_heads = 128;
517 dr->ms_sectors = 32;
518 dr->ms_cylinders = dr->ms_size / (128 * 32);
519 } else { /* MLX_GEOM_255/63 */
520 dr->ms_heads = 255;
521 dr->ms_sectors = 63;
522 dr->ms_cylinders = dr->ms_size / (255 * 63);
523 }
524 dr->ms_disk = device_add_child(sc->mlx_dev, /*"mlxd"*/NULL, -1);
525 if (dr->ms_disk == NULL)
526 device_printf(sc->mlx_dev, "device_add_child failed\n");
527 device_set_ivars(dr->ms_disk, dr);
528 }
529 }
530 kfree(mes, M_DEVBUF);
531 if ((error = bus_generic_attach(sc->mlx_dev)) != 0)
532 device_printf(sc->mlx_dev, "bus_generic_attach returned %d", error);
533
534 /* mark controller back up */
535 sc->mlx_state &= ~MLX_STATE_SHUTDOWN;
536
537 /* enable interrupts */
538 sc->mlx_intaction(sc, MLX_INTACTION_ENABLE);
539 }
540
541 /********************************************************************************
542 * Disconnect from the controller completely, in preparation for unload.
543 */
544 int
mlx_detach(device_t dev)545 mlx_detach(device_t dev)
546 {
547 struct mlx_softc *sc = device_get_softc(dev);
548 struct mlxd_softc *mlxd;
549 int i, error;
550
551 debug_called(1);
552
553 error = EBUSY;
554 crit_enter();
555 if (sc->mlx_state & MLX_STATE_OPEN)
556 goto out;
557
558 for (i = 0; i < MLX_MAXDRIVES; i++) {
559 if (sc->mlx_sysdrive[i].ms_disk != NULL) {
560 mlxd = device_get_softc(sc->mlx_sysdrive[i].ms_disk);
561 if (mlxd->mlxd_flags & MLXD_OPEN) { /* drive is mounted, abort detach */
562 device_printf(sc->mlx_sysdrive[i].ms_disk, "still open, can't detach\n");
563 goto out;
564 }
565 }
566 }
567 if ((error = mlx_shutdown(dev)))
568 goto out;
569
570 mlx_free(sc);
571
572 error = 0;
573 out:
574 crit_exit();
575 return(error);
576 }
577
578 /********************************************************************************
579 * Bring the controller down to a dormant state and detach all child devices.
580 *
581 * This function is called before detach, system shutdown, or before performing
582 * an operation which may add or delete system disks. (Call mlx_startup to
583 * resume normal operation.)
584 *
585 * Note that we can assume that the bioq on the controller is empty, as we won't
586 * allow shutdown if any device is open.
587 */
588 int
mlx_shutdown(device_t dev)589 mlx_shutdown(device_t dev)
590 {
591 struct mlx_softc *sc = device_get_softc(dev);
592 int i, error;
593
594 debug_called(1);
595
596 crit_enter();
597 error = 0;
598
599 sc->mlx_state |= MLX_STATE_SHUTDOWN;
600 sc->mlx_intaction(sc, MLX_INTACTION_DISABLE);
601
602 /* flush controller */
603 device_printf(sc->mlx_dev, "flushing cache...");
604 if (mlx_flush(sc)) {
605 kprintf("failed\n");
606 } else {
607 kprintf("done\n");
608 }
609
610 /* delete all our child devices */
611 for (i = 0; i < MLX_MAXDRIVES; i++) {
612 if (sc->mlx_sysdrive[i].ms_disk != NULL) {
613 if ((error = device_delete_child(sc->mlx_dev, sc->mlx_sysdrive[i].ms_disk)) != 0)
614 goto out;
615 sc->mlx_sysdrive[i].ms_disk = NULL;
616 }
617 }
618
619 out:
620 crit_exit();
621 return(error);
622 }
623
624 /********************************************************************************
625 * Bring the controller to a quiescent state, ready for system suspend.
626 */
627 int
mlx_suspend(device_t dev)628 mlx_suspend(device_t dev)
629 {
630 struct mlx_softc *sc = device_get_softc(dev);
631
632 debug_called(1);
633
634 crit_enter();
635 sc->mlx_state |= MLX_STATE_SUSPEND;
636
637 /* flush controller */
638 device_printf(sc->mlx_dev, "flushing cache...");
639 kprintf("%s\n", mlx_flush(sc) ? "failed" : "done");
640
641 sc->mlx_intaction(sc, MLX_INTACTION_DISABLE);
642 crit_exit();
643
644 return(0);
645 }
646
647 /********************************************************************************
648 * Bring the controller back to a state ready for operation.
649 */
650 int
mlx_resume(device_t dev)651 mlx_resume(device_t dev)
652 {
653 struct mlx_softc *sc = device_get_softc(dev);
654
655 debug_called(1);
656
657 sc->mlx_state &= ~MLX_STATE_SUSPEND;
658 sc->mlx_intaction(sc, MLX_INTACTION_ENABLE);
659
660 return(0);
661 }
662
663 /*******************************************************************************
664 * Take an interrupt, or be poked by other code to look for interrupt-worthy
665 * status.
666 */
667 void
mlx_intr(void * arg)668 mlx_intr(void *arg)
669 {
670 struct mlx_softc *sc = (struct mlx_softc *)arg;
671
672 debug_called(1);
673
674 /* collect finished commands, queue anything waiting */
675 mlx_done(sc);
676 }
677
678 /*******************************************************************************
679 * Receive a buf structure from a child device and queue it on a particular
680 * disk resource, then poke the disk resource to start as much work as it can.
681 */
682 int
mlx_submit_bio(struct mlx_softc * sc,struct bio * bio)683 mlx_submit_bio(struct mlx_softc *sc, struct bio *bio)
684 {
685 debug_called(1);
686
687 crit_enter();
688 bioqdisksort(&sc->mlx_bioq, bio);
689 sc->mlx_waitbufs++;
690 crit_exit();
691 mlx_startio(sc);
692 return(0);
693 }
694
695 /********************************************************************************
696 * Accept an open operation on the control device.
697 */
698 int
mlx_open(struct dev_open_args * ap)699 mlx_open(struct dev_open_args *ap)
700 {
701 cdev_t dev = ap->a_head.a_dev;
702 int unit = minor(dev);
703 struct mlx_softc *sc = devclass_get_softc(mlx_devclass, unit);
704
705 sc->mlx_state |= MLX_STATE_OPEN;
706 return(0);
707 }
708
709 /********************************************************************************
710 * Accept the last close on the control device.
711 */
712 int
mlx_close(struct dev_close_args * ap)713 mlx_close(struct dev_close_args *ap)
714 {
715 cdev_t dev = ap->a_head.a_dev;
716 int unit = minor(dev);
717 struct mlx_softc *sc = devclass_get_softc(mlx_devclass, unit);
718
719 sc->mlx_state &= ~MLX_STATE_OPEN;
720 return (0);
721 }
722
723 /********************************************************************************
724 * Handle controller-specific control operations.
725 */
726 int
mlx_ioctl(struct dev_ioctl_args * ap)727 mlx_ioctl(struct dev_ioctl_args *ap)
728 {
729 cdev_t dev = ap->a_head.a_dev;
730 int unit = minor(dev);
731 struct mlx_softc *sc = devclass_get_softc(mlx_devclass, unit);
732 struct mlx_rebuild_request *rb = (struct mlx_rebuild_request *)ap->a_data;
733 struct mlx_rebuild_status *rs = (struct mlx_rebuild_status *)ap->a_data;
734 int *arg = (int *)ap->a_data;
735 struct mlx_pause *mp;
736 struct mlx_sysdrive *dr;
737 struct mlxd_softc *mlxd;
738 int i, error;
739
740 switch(ap->a_cmd) {
741 /*
742 * Enumerate connected system drives; returns the first system drive's
743 * unit number if *arg is -1, or the next unit after *arg if it's
744 * a valid unit on this controller.
745 */
746 case MLX_NEXT_CHILD:
747 /* search system drives */
748 for (i = 0; i < MLX_MAXDRIVES; i++) {
749 /* is this one attached? */
750 if (sc->mlx_sysdrive[i].ms_disk != NULL) {
751 /* looking for the next one we come across? */
752 if (*arg == -1) {
753 *arg = device_get_unit(sc->mlx_sysdrive[0].ms_disk);
754 return(0);
755 }
756 /* we want the one after this one */
757 if (*arg == device_get_unit(sc->mlx_sysdrive[i].ms_disk))
758 *arg = -1;
759 }
760 }
761 return(ENOENT);
762
763 /*
764 * Scan the controller to see whether new drives have appeared.
765 */
766 case MLX_RESCAN_DRIVES:
767 mlx_startup(sc);
768 return(0);
769
770 /*
771 * Disconnect from the specified drive; it may be about to go
772 * away.
773 */
774 case MLX_DETACH_DRIVE: /* detach one drive */
775
776 if (((dr = mlx_findunit(sc, *arg)) == NULL) ||
777 ((mlxd = device_get_softc(dr->ms_disk)) == NULL))
778 return(ENOENT);
779
780 device_printf(dr->ms_disk, "detaching...");
781 error = 0;
782 if (mlxd->mlxd_flags & MLXD_OPEN) {
783 error = EBUSY;
784 goto detach_out;
785 }
786
787 /* flush controller */
788 if (mlx_flush(sc)) {
789 error = EBUSY;
790 goto detach_out;
791 }
792
793 /* nuke drive */
794 if ((error = device_delete_child(sc->mlx_dev, dr->ms_disk)) != 0)
795 goto detach_out;
796 dr->ms_disk = NULL;
797
798 detach_out:
799 if (error) {
800 kprintf("failed\n");
801 } else {
802 kprintf("done\n");
803 }
804 return(error);
805
806 /*
807 * Pause one or more SCSI channels for a period of time, to assist
808 * in the process of hot-swapping devices.
809 *
810 * Note that at least the 3.51 firmware on the DAC960PL doesn't seem
811 * to do this right.
812 */
813 case MLX_PAUSE_CHANNEL: /* schedule a channel pause */
814 /* Does this command work on this firmware? */
815 if (!(sc->mlx_feature & MLX_FEAT_PAUSEWORKS))
816 return(EOPNOTSUPP);
817
818 mp = (struct mlx_pause *)ap->a_data;
819 if ((mp->mp_which == MLX_PAUSE_CANCEL) && (sc->mlx_pause.mp_when != 0)) {
820 /* cancel a pending pause operation */
821 sc->mlx_pause.mp_which = 0;
822 } else {
823 /* fix for legal channels */
824 mp->mp_which &= ((1 << sc->mlx_enq2->me_actual_channels) -1);
825 /* check time values */
826 if ((mp->mp_when < 0) || (mp->mp_when > 3600))
827 return(EINVAL);
828 if ((mp->mp_howlong < 1) || (mp->mp_howlong > (0xf * 30)))
829 return(EINVAL);
830
831 /* check for a pause currently running */
832 if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when == 0))
833 return(EBUSY);
834
835 /* looks ok, go with it */
836 sc->mlx_pause.mp_which = mp->mp_which;
837 sc->mlx_pause.mp_when = time_uptime + mp->mp_when;
838 sc->mlx_pause.mp_howlong = sc->mlx_pause.mp_when + mp->mp_howlong;
839 }
840 return(0);
841
842 /*
843 * Accept a command passthrough-style.
844 */
845 case MLX_COMMAND:
846 return(mlx_user_command(sc, (struct mlx_usercommand *)ap->a_data));
847
848 /*
849 * Start a rebuild on a given SCSI disk
850 */
851 case MLX_REBUILDASYNC:
852 if (sc->mlx_background != 0) {
853 rb->rr_status = 0x0106;
854 return(EBUSY);
855 }
856 rb->rr_status = mlx_rebuild(sc, rb->rr_channel, rb->rr_target);
857 switch (rb->rr_status) {
858 case 0:
859 error = 0;
860 break;
861 case 0x10000:
862 error = ENOMEM; /* couldn't set up the command */
863 break;
864 case 0x0002:
865 error = EBUSY;
866 break;
867 case 0x0104:
868 error = EIO;
869 break;
870 case 0x0105:
871 error = ERANGE;
872 break;
873 case 0x0106:
874 error = EBUSY;
875 break;
876 default:
877 error = EINVAL;
878 break;
879 }
880 if (error == 0)
881 sc->mlx_background = MLX_BACKGROUND_REBUILD;
882 return(error);
883
884 /*
885 * Get the status of the current rebuild or consistency check.
886 */
887 case MLX_REBUILDSTAT:
888 *rs = sc->mlx_rebuildstat;
889 return(0);
890
891 /*
892 * Return the per-controller system drive number matching the
893 * disk device number in (arg), if it happens to belong to us.
894 */
895 case MLX_GET_SYSDRIVE:
896 error = ENOENT;
897 mlxd = (struct mlxd_softc *)devclass_get_softc(mlxd_devclass, *arg);
898 if ((mlxd != NULL) && (mlxd->mlxd_drive >= sc->mlx_sysdrive) &&
899 (mlxd->mlxd_drive < (sc->mlx_sysdrive + MLX_MAXDRIVES))) {
900 error = 0;
901 *arg = mlxd->mlxd_drive - sc->mlx_sysdrive;
902 }
903 return(error);
904
905 default:
906 return(ENOTTY);
907 }
908 }
909
910 /********************************************************************************
911 * Handle operations requested by a System Drive connected to this controller.
912 */
913 int
mlx_submit_ioctl(struct mlx_softc * sc,struct mlx_sysdrive * drive,u_long cmd,caddr_t addr,int32_t flag)914 mlx_submit_ioctl(struct mlx_softc *sc, struct mlx_sysdrive *drive, u_long cmd,
915 caddr_t addr, int32_t flag)
916 {
917 int *arg = (int *)addr;
918 int error, result;
919
920 switch(cmd) {
921 /*
922 * Return the current status of this drive.
923 */
924 case MLXD_STATUS:
925 *arg = drive->ms_state;
926 return(0);
927
928 /*
929 * Start a background consistency check on this drive.
930 */
931 case MLXD_CHECKASYNC: /* start a background consistency check */
932 if (sc->mlx_background != 0) {
933 *arg = 0x0106;
934 return(EBUSY);
935 }
936 result = mlx_check(sc, drive - &sc->mlx_sysdrive[0]);
937 switch (result) {
938 case 0:
939 error = 0;
940 break;
941 case 0x10000:
942 error = ENOMEM; /* couldn't set up the command */
943 break;
944 case 0x0002:
945 error = EIO;
946 break;
947 case 0x0105:
948 error = ERANGE;
949 break;
950 case 0x0106:
951 error = EBUSY;
952 break;
953 default:
954 error = EINVAL;
955 break;
956 }
957 if (error == 0)
958 sc->mlx_background = MLX_BACKGROUND_CHECK;
959 *arg = result;
960 return(error);
961
962 }
963 return(ENOIOCTL);
964 }
965
966
967 /********************************************************************************
968 ********************************************************************************
969 Status Monitoring
970 ********************************************************************************
971 ********************************************************************************/
972
973 /********************************************************************************
974 * Fire off commands to periodically check the status of connected drives.
975 */
976 static void
mlx_periodic(void * data)977 mlx_periodic(void *data)
978 {
979 struct mlx_softc *sc = (struct mlx_softc *)data;
980
981 debug_called(1);
982
983 /*
984 * Run a bus pause?
985 */
986 if ((sc->mlx_pause.mp_which != 0) &&
987 (sc->mlx_pause.mp_when > 0) &&
988 (time_uptime >= sc->mlx_pause.mp_when)){
989
990 mlx_pause_action(sc); /* pause is running */
991 sc->mlx_pause.mp_when = 0;
992 sysbeep(500, hz);
993
994 /*
995 * Bus pause still running?
996 */
997 } else if ((sc->mlx_pause.mp_which != 0) &&
998 (sc->mlx_pause.mp_when == 0)) {
999
1000 /* time to stop bus pause? */
1001 if (time_uptime >= sc->mlx_pause.mp_howlong) {
1002 mlx_pause_action(sc);
1003 sc->mlx_pause.mp_which = 0; /* pause is complete */
1004 sysbeep(500, hz);
1005 } else {
1006 sysbeep((time_uptime % 5) * 100 + 500, hz/8);
1007 }
1008
1009 /*
1010 * Run normal periodic activities?
1011 */
1012 } else if (time_uptime > (sc->mlx_lastpoll + 10)) {
1013 sc->mlx_lastpoll = time_uptime;
1014
1015 /*
1016 * Check controller status.
1017 *
1018 * XXX Note that this may not actually launch a command in situations of high load.
1019 */
1020 mlx_enquire(sc, (sc->mlx_iftype == MLX_IFTYPE_2) ? MLX_CMD_ENQUIRY_OLD : MLX_CMD_ENQUIRY,
1021 imax(sizeof(struct mlx_enquiry), sizeof(struct mlx_enquiry_old)), mlx_periodic_enquiry);
1022
1023 /*
1024 * Check system drive status.
1025 *
1026 * XXX This might be better left to event-driven detection, eg. I/O to an offline
1027 * drive will detect it's offline, rebuilds etc. should detect the drive is back
1028 * online.
1029 */
1030 mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(struct mlx_enq_sys_drive) * MLX_MAXDRIVES,
1031 mlx_periodic_enquiry);
1032
1033 }
1034
1035 /* get drive rebuild/check status */
1036 /* XXX should check sc->mlx_background if this is only valid while in progress */
1037 mlx_enquire(sc, MLX_CMD_REBUILDSTAT, sizeof(struct mlx_rebuild_stat), mlx_periodic_rebuild);
1038
1039 /* deal with possibly-missed interrupts and timed-out commands */
1040 mlx_done(sc);
1041
1042 /* reschedule another poll next second or so */
1043 callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc);
1044 }
1045
1046 /********************************************************************************
1047 * Handle the result of an ENQUIRY command instigated by periodic status polling.
1048 */
1049 static void
mlx_periodic_enquiry(struct mlx_command * mc)1050 mlx_periodic_enquiry(struct mlx_command *mc)
1051 {
1052 struct mlx_softc *sc = mc->mc_sc;
1053
1054 debug_called(1);
1055
1056 /* Command completed OK? */
1057 if (mc->mc_status != 0) {
1058 device_printf(sc->mlx_dev, "periodic enquiry failed - %s\n", mlx_diagnose_command(mc));
1059 goto out;
1060 }
1061
1062 /* respond to command */
1063 switch(mc->mc_mailbox[0]) {
1064 /*
1065 * This is currently a bit fruitless, as we don't know how to extract the eventlog
1066 * pointer yet.
1067 */
1068 case MLX_CMD_ENQUIRY_OLD:
1069 {
1070 struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data;
1071 struct mlx_enquiry_old *meo = (struct mlx_enquiry_old *)mc->mc_data;
1072 int i;
1073
1074 /* convert data in-place to new format */
1075 for (i = NELEM(me->me_dead) - 1; i >= 0; i--) {
1076 me->me_dead[i].dd_chan = meo->me_dead[i].dd_chan;
1077 me->me_dead[i].dd_targ = meo->me_dead[i].dd_targ;
1078 }
1079 me->me_misc_flags = 0;
1080 me->me_rebuild_count = meo->me_rebuild_count;
1081 me->me_dead_count = meo->me_dead_count;
1082 me->me_critical_sd_count = meo->me_critical_sd_count;
1083 me->me_event_log_seq_num = 0;
1084 me->me_offline_sd_count = meo->me_offline_sd_count;
1085 me->me_max_commands = meo->me_max_commands;
1086 me->me_rebuild_flag = meo->me_rebuild_flag;
1087 me->me_fwmajor = meo->me_fwmajor;
1088 me->me_fwminor = meo->me_fwminor;
1089 me->me_status_flags = meo->me_status_flags;
1090 me->me_flash_age = meo->me_flash_age;
1091 for (i = NELEM(me->me_drvsize) - 1; i >= 0; i--) {
1092 if (i > (NELEM(meo->me_drvsize) - 1)) {
1093 me->me_drvsize[i] = 0; /* drive beyond supported range */
1094 } else {
1095 me->me_drvsize[i] = meo->me_drvsize[i];
1096 }
1097 }
1098 me->me_num_sys_drvs = meo->me_num_sys_drvs;
1099 }
1100 /* FALLTHROUGH */
1101
1102 /*
1103 * Generic controller status update. We could do more with this than just
1104 * checking the event log.
1105 */
1106 case MLX_CMD_ENQUIRY:
1107 {
1108 struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data;
1109
1110 if (sc->mlx_currevent == -1) {
1111 /* initialise our view of the event log */
1112 sc->mlx_currevent = sc->mlx_lastevent = me->me_event_log_seq_num;
1113 } else if ((me->me_event_log_seq_num != sc->mlx_lastevent) && !(sc->mlx_flags & MLX_EVENTLOG_BUSY)) {
1114 /* record where current events are up to */
1115 sc->mlx_currevent = me->me_event_log_seq_num;
1116 debug(1, "event log pointer was %d, now %d\n", sc->mlx_lastevent, sc->mlx_currevent);
1117
1118 /* mark the event log as busy */
1119 atomic_set_int(&sc->mlx_flags, MLX_EVENTLOG_BUSY);
1120
1121 /* drain new eventlog entries */
1122 mlx_periodic_eventlog_poll(sc);
1123 }
1124 break;
1125 }
1126 case MLX_CMD_ENQSYSDRIVE:
1127 {
1128 struct mlx_enq_sys_drive *mes = (struct mlx_enq_sys_drive *)mc->mc_data;
1129 struct mlx_sysdrive *dr;
1130 int i;
1131
1132 for (i = 0, dr = &sc->mlx_sysdrive[0];
1133 (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff);
1134 i++) {
1135
1136 /* has state been changed by controller? */
1137 if (dr->ms_state != mes[i].sd_state) {
1138 switch(mes[i].sd_state) {
1139 case MLX_SYSD_OFFLINE:
1140 device_printf(dr->ms_disk, "drive offline\n");
1141 break;
1142 case MLX_SYSD_ONLINE:
1143 device_printf(dr->ms_disk, "drive online\n");
1144 break;
1145 case MLX_SYSD_CRITICAL:
1146 device_printf(dr->ms_disk, "drive critical\n");
1147 break;
1148 }
1149 /* save new state */
1150 dr->ms_state = mes[i].sd_state;
1151 }
1152 }
1153 break;
1154 }
1155 default:
1156 device_printf(sc->mlx_dev, "%s: unknown command 0x%x", __func__, mc->mc_mailbox[0]);
1157 break;
1158 }
1159
1160 out:
1161 kfree(mc->mc_data, M_DEVBUF);
1162 mlx_releasecmd(mc);
1163 }
1164
1165 /********************************************************************************
1166 * Instigate a poll for one event log message on (sc).
1167 * We only poll for one message at a time, to keep our command usage down.
1168 */
1169 static void
mlx_periodic_eventlog_poll(struct mlx_softc * sc)1170 mlx_periodic_eventlog_poll(struct mlx_softc *sc)
1171 {
1172 struct mlx_command *mc;
1173 void *result = NULL;
1174 int error;
1175
1176 debug_called(1);
1177
1178 /* get ourselves a command buffer */
1179 error = 1;
1180 if ((mc = mlx_alloccmd(sc)) == NULL)
1181 goto out;
1182 /*
1183 * allocate the response structure - sizeof(struct mlx_eventlog_entry)?
1184 * Called from timeout - use M_NOWAIT (repoll later on failure?)
1185 */
1186 if ((result = kmalloc(1024, M_DEVBUF, M_NOWAIT)) == NULL)
1187 goto out;
1188 /* get a command slot */
1189 if (mlx_getslot(mc))
1190 goto out;
1191
1192 /* map the command so the controller can see it */
1193 mc->mc_data = result;
1194 mc->mc_length = /*sizeof(struct mlx_eventlog_entry)*/1024;
1195 mlx_mapcmd(mc);
1196
1197 /* build the command to get one entry */
1198 mlx_make_type3(mc, MLX_CMD_LOGOP, MLX_LOGOP_GET, 1, sc->mlx_lastevent, 0, 0, mc->mc_dataphys, 0);
1199 mc->mc_complete = mlx_periodic_eventlog_respond;
1200 mc->mc_private = mc;
1201
1202 /* start the command */
1203 if ((error = mlx_start(mc)) != 0)
1204 goto out;
1205
1206 error = 0; /* success */
1207 out:
1208 if (error != 0) {
1209 if (mc != NULL)
1210 mlx_releasecmd(mc);
1211 if (result != NULL)
1212 kfree(result, M_DEVBUF);
1213 }
1214 }
1215
1216 /********************************************************************************
1217 * Handle the result of polling for a log message, generate diagnostic output.
1218 * If this wasn't the last message waiting for us, we'll go collect another.
1219 */
1220 static char *mlx_sense_messages[] = {
1221 "because write recovery failed",
1222 "because of SCSI bus reset failure",
1223 "because of double check condition",
1224 "because it was removed",
1225 "because of gross error on SCSI chip",
1226 "because of bad tag returned from drive",
1227 "because of timeout on SCSI command",
1228 "because of reset SCSI command issued from system",
1229 "because busy or parity error count exceeded limit",
1230 "because of 'kill drive' command from system",
1231 "because of selection timeout",
1232 "due to SCSI phase sequence error",
1233 "due to unknown status"
1234 };
1235
1236 static void
mlx_periodic_eventlog_respond(struct mlx_command * mc)1237 mlx_periodic_eventlog_respond(struct mlx_command *mc)
1238 {
1239 struct mlx_softc *sc = mc->mc_sc;
1240 struct mlx_eventlog_entry *el = (struct mlx_eventlog_entry *)mc->mc_data;
1241 char *reason;
1242 char hexstr[2][12];
1243
1244 debug_called(1);
1245
1246 sc->mlx_lastevent++; /* next message... */
1247 if (mc->mc_status == 0) {
1248
1249 /* handle event log message */
1250 switch(el->el_type) {
1251 /*
1252 * This is the only sort of message we understand at the moment.
1253 * The tests here are probably incomplete.
1254 */
1255 case MLX_LOGMSG_SENSE: /* sense data */
1256 /* Mylex vendor-specific message indicating a drive was killed? */
1257 if ((el->el_sensekey == 9) &&
1258 (el->el_asc == 0x80)) {
1259 if (el->el_asq < NELEM(mlx_sense_messages)) {
1260 reason = mlx_sense_messages[el->el_asq];
1261 } else {
1262 reason = "for unknown reason";
1263 }
1264 device_printf(sc->mlx_dev, "physical drive %d:%d killed %s\n",
1265 el->el_channel, el->el_target, reason);
1266 }
1267 /* SCSI drive was reset? */
1268 if ((el->el_sensekey == 6) && (el->el_asc == 0x29)) {
1269 device_printf(sc->mlx_dev, "physical drive %d:%d reset\n",
1270 el->el_channel, el->el_target);
1271 }
1272 /* SCSI drive error? */
1273 if (!((el->el_sensekey == 0) ||
1274 ((el->el_sensekey == 2) &&
1275 (el->el_asc == 0x04) &&
1276 ((el->el_asq == 0x01) ||
1277 (el->el_asq == 0x02))))) {
1278 device_printf(sc->mlx_dev, "physical drive %d:%d error log: sense = %d asc = %x asq = %x\n",
1279 el->el_channel, el->el_target, el->el_sensekey, el->el_asc, el->el_asq);
1280 device_printf(sc->mlx_dev, " info %s csi %s\n", hexncpy(el->el_information, 4, hexstr[0], 12, ":"),
1281 hexncpy(el->el_csi, 4, hexstr[1], 12, ":"));
1282 }
1283 break;
1284
1285 default:
1286 device_printf(sc->mlx_dev, "unknown log message type 0x%x\n", el->el_type);
1287 break;
1288 }
1289 } else {
1290 device_printf(sc->mlx_dev, "error reading message log - %s\n", mlx_diagnose_command(mc));
1291 /* give up on all the outstanding messages, as we may have come unsynched */
1292 sc->mlx_lastevent = sc->mlx_currevent;
1293 }
1294
1295 /* dispose of command and data */
1296 kfree(mc->mc_data, M_DEVBUF);
1297 mlx_releasecmd(mc);
1298
1299 /* is there another message to obtain? */
1300 if (sc->mlx_lastevent != sc->mlx_currevent) {
1301 mlx_periodic_eventlog_poll(sc);
1302 } else {
1303 /* clear log-busy status */
1304 atomic_clear_int(&sc->mlx_flags, MLX_EVENTLOG_BUSY);
1305 }
1306 }
1307
1308 /********************************************************************************
1309 * Handle check/rebuild operations in progress.
1310 */
1311 static void
mlx_periodic_rebuild(struct mlx_command * mc)1312 mlx_periodic_rebuild(struct mlx_command *mc)
1313 {
1314 struct mlx_softc *sc = mc->mc_sc;
1315 struct mlx_rebuild_status *mr = (struct mlx_rebuild_status *)mc->mc_data;
1316
1317 switch(mc->mc_status) {
1318 case 0: /* operation running, update stats */
1319 sc->mlx_rebuildstat = *mr;
1320
1321 /* spontaneous rebuild/check? */
1322 if (sc->mlx_background == 0) {
1323 sc->mlx_background = MLX_BACKGROUND_SPONTANEOUS;
1324 device_printf(sc->mlx_dev, "background check/rebuild operation started\n");
1325 }
1326 break;
1327
1328 case 0x0105: /* nothing running, finalise stats and report */
1329 switch(sc->mlx_background) {
1330 case MLX_BACKGROUND_CHECK:
1331 device_printf(sc->mlx_dev, "consistency check completed\n"); /* XXX print drive? */
1332 break;
1333 case MLX_BACKGROUND_REBUILD:
1334 device_printf(sc->mlx_dev, "drive rebuild completed\n"); /* XXX print channel/target? */
1335 break;
1336 case MLX_BACKGROUND_SPONTANEOUS:
1337 default:
1338 /* if we have previously been non-idle, report the transition */
1339 if (sc->mlx_rebuildstat.rs_code != MLX_REBUILDSTAT_IDLE) {
1340 device_printf(sc->mlx_dev, "background check/rebuild operation completed\n");
1341 }
1342 }
1343 sc->mlx_background = 0;
1344 sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE;
1345 break;
1346 }
1347 kfree(mc->mc_data, M_DEVBUF);
1348 mlx_releasecmd(mc);
1349 }
1350
1351 /********************************************************************************
1352 ********************************************************************************
1353 Channel Pause
1354 ********************************************************************************
1355 ********************************************************************************/
1356
1357 /********************************************************************************
1358 * It's time to perform a channel pause action for (sc), either start or stop
1359 * the pause.
1360 */
1361 static void
mlx_pause_action(struct mlx_softc * sc)1362 mlx_pause_action(struct mlx_softc *sc)
1363 {
1364 struct mlx_command *mc;
1365 int failsafe, i, command;
1366
1367 /* What are we doing here? */
1368 if (sc->mlx_pause.mp_when == 0) {
1369 command = MLX_CMD_STARTCHANNEL;
1370 failsafe = 0;
1371
1372 } else {
1373 command = MLX_CMD_STOPCHANNEL;
1374
1375 /*
1376 * Channels will always start again after the failsafe period,
1377 * which is specified in multiples of 30 seconds.
1378 * This constrains us to a maximum pause of 450 seconds.
1379 */
1380 failsafe = ((sc->mlx_pause.mp_howlong - time_uptime) + 5) / 30;
1381 if (failsafe > 0xf) {
1382 failsafe = 0xf;
1383 sc->mlx_pause.mp_howlong = time_uptime + (0xf * 30) - 5;
1384 }
1385 }
1386
1387 /* build commands for every channel requested */
1388 for (i = 0; i < sc->mlx_enq2->me_actual_channels; i++) {
1389 if ((1 << i) & sc->mlx_pause.mp_which) {
1390
1391 /* get ourselves a command buffer */
1392 if ((mc = mlx_alloccmd(sc)) == NULL)
1393 goto fail;
1394 /* get a command slot */
1395 mc->mc_flags |= MLX_CMD_PRIORITY;
1396 if (mlx_getslot(mc))
1397 goto fail;
1398
1399 /* build the command */
1400 mlx_make_type2(mc, command, (failsafe << 4) | i, 0, 0, 0, 0, 0, 0, 0);
1401 mc->mc_complete = mlx_pause_done;
1402 mc->mc_private = sc; /* XXX not needed */
1403 if (mlx_start(mc))
1404 goto fail;
1405 /* command submitted OK */
1406 return;
1407
1408 fail:
1409 device_printf(sc->mlx_dev, "%s failed for channel %d\n",
1410 command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", i);
1411 if (mc != NULL)
1412 mlx_releasecmd(mc);
1413 }
1414 }
1415 }
1416
1417 static void
mlx_pause_done(struct mlx_command * mc)1418 mlx_pause_done(struct mlx_command *mc)
1419 {
1420 struct mlx_softc *sc = mc->mc_sc;
1421 int command = mc->mc_mailbox[0];
1422 int channel = mc->mc_mailbox[2] & 0xf;
1423
1424 if (mc->mc_status != 0) {
1425 device_printf(sc->mlx_dev, "%s command failed - %s\n",
1426 command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", mlx_diagnose_command(mc));
1427 } else if (command == MLX_CMD_STOPCHANNEL) {
1428 device_printf(sc->mlx_dev, "channel %d pausing for %ld seconds\n",
1429 channel, (long)(sc->mlx_pause.mp_howlong - time_uptime));
1430 } else {
1431 device_printf(sc->mlx_dev, "channel %d resuming\n", channel);
1432 }
1433 mlx_releasecmd(mc);
1434 }
1435
1436 /********************************************************************************
1437 ********************************************************************************
1438 Command Submission
1439 ********************************************************************************
1440 ********************************************************************************/
1441
1442 /********************************************************************************
1443 * Perform an Enquiry command using a type-3 command buffer and a return a single
1444 * linear result buffer. If the completion function is specified, it will
1445 * be called with the completed command (and the result response will not be
1446 * valid until that point). Otherwise, the command will either be busy-waited
1447 * for (interrupts not enabled), or slept for.
1448 */
1449 static void *
mlx_enquire(struct mlx_softc * sc,int command,size_t bufsize,void (* complete)(struct mlx_command * mc))1450 mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize, void (* complete)(struct mlx_command *mc))
1451 {
1452 struct mlx_command *mc;
1453 void *result;
1454 int error;
1455
1456 debug_called(1);
1457
1458 /* get ourselves a command buffer */
1459 error = 1;
1460 result = NULL;
1461 if ((mc = mlx_alloccmd(sc)) == NULL)
1462 goto out;
1463 /* allocate the response structure */
1464 result = kmalloc(bufsize, M_DEVBUF, M_INTWAIT);
1465 /* get a command slot */
1466 mc->mc_flags |= MLX_CMD_PRIORITY | MLX_CMD_DATAOUT;
1467 if (mlx_getslot(mc))
1468 goto out;
1469
1470 /* map the command so the controller can see it */
1471 mc->mc_data = result;
1472 mc->mc_length = bufsize;
1473 mlx_mapcmd(mc);
1474
1475 /* build an enquiry command */
1476 mlx_make_type2(mc, command, 0, 0, 0, 0, 0, 0, mc->mc_dataphys, 0);
1477
1478 /* do we want a completion callback? */
1479 if (complete != NULL) {
1480 mc->mc_complete = complete;
1481 mc->mc_private = mc;
1482 if ((error = mlx_start(mc)) != 0)
1483 goto out;
1484 } else {
1485 /* run the command in either polled or wait mode */
1486 if ((sc->mlx_state & MLX_STATE_INTEN) ? mlx_wait_command(mc) : mlx_poll_command(mc))
1487 goto out;
1488
1489 /* command completed OK? */
1490 if (mc->mc_status != 0) {
1491 device_printf(sc->mlx_dev, "ENQUIRY failed - %s\n", mlx_diagnose_command(mc));
1492 goto out;
1493 }
1494 }
1495 error = 0; /* success */
1496 out:
1497 /* we got a command, but nobody else will free it */
1498 if ((complete == NULL) && (mc != NULL))
1499 mlx_releasecmd(mc);
1500 /* we got an error, and we allocated a result */
1501 if ((error != 0) && (result != NULL)) {
1502 kfree(result, M_DEVBUF);
1503 result = NULL;
1504 }
1505 return(result);
1506 }
1507
1508
1509 /********************************************************************************
1510 * Perform a Flush command on the nominated controller.
1511 *
1512 * May be called with interrupts enabled or disabled; will not return until
1513 * the flush operation completes or fails.
1514 */
1515 static int
mlx_flush(struct mlx_softc * sc)1516 mlx_flush(struct mlx_softc *sc)
1517 {
1518 struct mlx_command *mc;
1519 int error;
1520
1521 debug_called(1);
1522
1523 /* get ourselves a command buffer */
1524 error = 1;
1525 if ((mc = mlx_alloccmd(sc)) == NULL)
1526 goto out;
1527 /* get a command slot */
1528 if (mlx_getslot(mc))
1529 goto out;
1530
1531 /* build a flush command */
1532 mlx_make_type2(mc, MLX_CMD_FLUSH, 0, 0, 0, 0, 0, 0, 0, 0);
1533
1534 /* can't assume that interrupts are going to work here, so play it safe */
1535 if (mlx_poll_command(mc))
1536 goto out;
1537
1538 /* command completed OK? */
1539 if (mc->mc_status != 0) {
1540 device_printf(sc->mlx_dev, "FLUSH failed - %s\n", mlx_diagnose_command(mc));
1541 goto out;
1542 }
1543
1544 error = 0; /* success */
1545 out:
1546 if (mc != NULL)
1547 mlx_releasecmd(mc);
1548 return(error);
1549 }
1550
1551 /********************************************************************************
1552 * Start a background consistency check on (drive).
1553 *
1554 * May be called with interrupts enabled or disabled; will return as soon as the
1555 * operation has started or been refused.
1556 */
1557 static int
mlx_check(struct mlx_softc * sc,int drive)1558 mlx_check(struct mlx_softc *sc, int drive)
1559 {
1560 struct mlx_command *mc;
1561 int error;
1562
1563 debug_called(1);
1564
1565 /* get ourselves a command buffer */
1566 error = 0x10000;
1567 if ((mc = mlx_alloccmd(sc)) == NULL)
1568 goto out;
1569 /* get a command slot */
1570 if (mlx_getslot(mc))
1571 goto out;
1572
1573 /* build a checkasync command, set the "fix it" flag */
1574 mlx_make_type2(mc, MLX_CMD_CHECKASYNC, 0, 0, 0, 0, 0, drive | 0x80, 0, 0);
1575
1576 /* start the command and wait for it to be returned */
1577 if (mlx_wait_command(mc))
1578 goto out;
1579
1580 /* command completed OK? */
1581 if (mc->mc_status != 0) {
1582 device_printf(sc->mlx_dev, "CHECK ASYNC failed - %s\n", mlx_diagnose_command(mc));
1583 } else {
1584 device_printf(sc->mlx_sysdrive[drive].ms_disk, "consistency check started");
1585 }
1586 error = mc->mc_status;
1587
1588 out:
1589 if (mc != NULL)
1590 mlx_releasecmd(mc);
1591 return(error);
1592 }
1593
1594 /********************************************************************************
1595 * Start a background rebuild of the physical drive at (channel),(target).
1596 *
1597 * May be called with interrupts enabled or disabled; will return as soon as the
1598 * operation has started or been refused.
1599 */
1600 static int
mlx_rebuild(struct mlx_softc * sc,int channel,int target)1601 mlx_rebuild(struct mlx_softc *sc, int channel, int target)
1602 {
1603 struct mlx_command *mc;
1604 int error;
1605
1606 debug_called(1);
1607
1608 /* get ourselves a command buffer */
1609 error = 0x10000;
1610 if ((mc = mlx_alloccmd(sc)) == NULL)
1611 goto out;
1612 /* get a command slot */
1613 if (mlx_getslot(mc))
1614 goto out;
1615
1616 /* build a checkasync command, set the "fix it" flag */
1617 mlx_make_type2(mc, MLX_CMD_REBUILDASYNC, channel, target, 0, 0, 0, 0, 0, 0);
1618
1619 /* start the command and wait for it to be returned */
1620 if (mlx_wait_command(mc))
1621 goto out;
1622
1623 /* command completed OK? */
1624 if (mc->mc_status != 0) {
1625 device_printf(sc->mlx_dev, "REBUILD ASYNC failed - %s\n", mlx_diagnose_command(mc));
1626 } else {
1627 device_printf(sc->mlx_dev, "drive rebuild started for %d:%d\n", channel, target);
1628 }
1629 error = mc->mc_status;
1630
1631 out:
1632 if (mc != NULL)
1633 mlx_releasecmd(mc);
1634 return(error);
1635 }
1636
1637 /********************************************************************************
1638 * Run the command (mc) and return when it completes.
1639 *
1640 * Interrupts need to be enabled; returns nonzero on error.
1641 */
1642 static int
mlx_wait_command(struct mlx_command * mc)1643 mlx_wait_command(struct mlx_command *mc)
1644 {
1645 struct mlx_softc *sc = mc->mc_sc;
1646 int error, count;
1647
1648 debug_called(1);
1649
1650 mc->mc_complete = NULL;
1651 mc->mc_private = mc; /* wake us when you're done */
1652 if ((error = mlx_start(mc)) != 0)
1653 return(error);
1654
1655 count = 0;
1656 /* XXX better timeout? */
1657 while ((mc->mc_status == MLX_STATUS_BUSY) && (count < 30)) {
1658 tsleep(mc->mc_private, PCATCH, "mlxwcmd", hz);
1659 }
1660
1661 if (mc->mc_status != 0) {
1662 device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc));
1663 return(EIO);
1664 }
1665 return(0);
1666 }
1667
1668
1669 /********************************************************************************
1670 * Start the command (mc) and busy-wait for it to complete.
1671 *
1672 * Should only be used when interrupts can't be relied upon. Returns 0 on
1673 * success, nonzero on error.
1674 * Successfully completed commands are dequeued.
1675 */
1676 static int
mlx_poll_command(struct mlx_command * mc)1677 mlx_poll_command(struct mlx_command *mc)
1678 {
1679 struct mlx_softc *sc = mc->mc_sc;
1680 int error, count;
1681
1682 debug_called(1);
1683
1684 mc->mc_complete = NULL;
1685 mc->mc_private = NULL; /* we will poll for it */
1686 if ((error = mlx_start(mc)) != 0)
1687 return(error);
1688
1689 count = 0;
1690 do {
1691 /* poll for completion */
1692 mlx_done(mc->mc_sc);
1693
1694 } while ((mc->mc_status == MLX_STATUS_BUSY) && (count++ < 15000000));
1695 if (mc->mc_status != MLX_STATUS_BUSY) {
1696 crit_enter();
1697 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link);
1698 crit_exit();
1699 return(0);
1700 }
1701 device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc));
1702 return(EIO);
1703 }
1704
1705 /********************************************************************************
1706 * Pull as much work off the softc's work queue as possible and give it to the
1707 * controller. Leave a couple of slots free for emergencies.
1708 *
1709 * Must be called at splbio or in an equivalent fashion that prevents
1710 * reentry or activity on the bioq.
1711 */
1712 static void
mlx_startio(struct mlx_softc * sc)1713 mlx_startio(struct mlx_softc *sc)
1714 {
1715 struct mlx_command *mc;
1716 struct mlxd_softc *mlxd;
1717 struct bio *bio;
1718 struct buf *bp;
1719 int blkcount;
1720 int driveno;
1721 int cmd;
1722 u_daddr_t blkno;
1723
1724 /* avoid reentrancy */
1725 if (mlx_lock_tas(sc, MLX_LOCK_STARTING))
1726 return;
1727
1728 /* spin until something prevents us from doing any work */
1729 crit_enter();
1730 for (;;) {
1731
1732 /* see if there's work to be done */
1733 if ((bio = bioq_first(&sc->mlx_bioq)) == NULL)
1734 break;
1735 /* get a command */
1736 if ((mc = mlx_alloccmd(sc)) == NULL)
1737 break;
1738 /* get a slot for the command */
1739 if (mlx_getslot(mc) != 0) {
1740 mlx_releasecmd(mc);
1741 break;
1742 }
1743 /* get the buf containing our work */
1744 bioq_remove(&sc->mlx_bioq, bio);
1745 bp = bio->bio_buf;
1746 sc->mlx_waitbufs--;
1747 crit_exit();
1748
1749 /* connect the buf to the command */
1750 mc->mc_complete = mlx_completeio;
1751 mc->mc_private = bp;
1752 mc->mc_data = bp->b_data;
1753 mc->mc_length = bp->b_bcount;
1754 if (bp->b_cmd == BUF_CMD_READ) {
1755 mc->mc_flags |= MLX_CMD_DATAIN;
1756 cmd = MLX_CMD_READSG;
1757 } else {
1758 mc->mc_flags |= MLX_CMD_DATAOUT;
1759 cmd = MLX_CMD_WRITESG;
1760 }
1761
1762 /* map the command so the controller can work with it */
1763 mlx_mapcmd(mc);
1764
1765 /* build a suitable I/O command (assumes 512-byte rounded transfers) */
1766 mlxd = (struct mlxd_softc *)bio->bio_driver_info;
1767 driveno = mlxd->mlxd_drive - sc->mlx_sysdrive;
1768 blkcount = (bp->b_bcount + MLX_BLKSIZE - 1) / MLX_BLKSIZE;
1769 blkno = bio->bio_offset / MLX_BLKSIZE;
1770
1771 if ((blkno + blkcount) > sc->mlx_sysdrive[driveno].ms_size)
1772 device_printf(sc->mlx_dev, "I/O beyond end of unit (%u,%d > %u)\n",
1773 blkno, blkcount, sc->mlx_sysdrive[driveno].ms_size);
1774
1775 /*
1776 * Build the I/O command. Note that the SG list type bits are set to zero,
1777 * denoting the format of SG list that we are using.
1778 */
1779 if (sc->mlx_iftype == MLX_IFTYPE_2) {
1780 mlx_make_type1(mc, (cmd == MLX_CMD_WRITESG) ? MLX_CMD_WRITESG_OLD : MLX_CMD_READSG_OLD,
1781 blkcount & 0xff, /* xfer length low byte */
1782 blkno, /* physical block number */
1783 driveno, /* target drive number */
1784 mc->mc_sgphys, /* location of SG list */
1785 mc->mc_nsgent & 0x3f); /* size of SG list (top 3 bits clear) */
1786 } else {
1787 mlx_make_type5(mc, cmd,
1788 blkcount & 0xff, /* xfer length low byte */
1789 (driveno << 3) | ((blkcount >> 8) & 0x07), /* target and length high 3 bits */
1790 blkno, /* physical block number */
1791 mc->mc_sgphys, /* location of SG list */
1792 mc->mc_nsgent & 0x3f); /* size of SG list (top 3 bits clear) */
1793 }
1794
1795 /* try to give command to controller */
1796 if (mlx_start(mc) != 0) {
1797 /* fail the command */
1798 mc->mc_status = MLX_STATUS_WEDGED;
1799 mlx_completeio(mc);
1800 }
1801 crit_enter();
1802 }
1803 crit_exit();
1804 mlx_lock_clr(sc, MLX_LOCK_STARTING);
1805 }
1806
1807 /********************************************************************************
1808 * Handle completion of an I/O command.
1809 */
1810 static void
mlx_completeio(struct mlx_command * mc)1811 mlx_completeio(struct mlx_command *mc)
1812 {
1813 struct mlx_softc *sc = mc->mc_sc;
1814 struct bio *bio = (mlx_bio *)mc->mc_private;
1815 struct mlxd_softc *mlxd = (struct mlxd_softc *)bio->bio_driver_info;
1816 struct buf *bp = bio->bio_buf;
1817
1818 if (mc->mc_status != MLX_STATUS_OK) { /* could be more verbose here? */
1819 bp->b_error = EIO;
1820 bp->b_flags |= B_ERROR;
1821
1822 switch(mc->mc_status) {
1823 case MLX_STATUS_RDWROFFLINE: /* system drive has gone offline */
1824 device_printf(mlxd->mlxd_dev, "drive offline\n");
1825 /* should signal this with a return code */
1826 mlxd->mlxd_drive->ms_state = MLX_SYSD_OFFLINE;
1827 break;
1828
1829 default: /* other I/O error */
1830 device_printf(sc->mlx_dev, "I/O error - %s\n", mlx_diagnose_command(mc));
1831 #if 0
1832 device_printf(sc->mlx_dev, " b_bcount %ld offset %lld\n",
1833 bp->b_bcount, bio->bio_offset);
1834 device_printf(sc->mlx_dev, " %13D\n", mc->mc_mailbox, " ");
1835 #endif
1836 break;
1837 }
1838 }
1839 mlx_releasecmd(mc);
1840 mlxd_intr(bio);
1841 }
1842
1843 /********************************************************************************
1844 * Take a command from user-space and try to run it.
1845 *
1846 * XXX Note that this can't perform very much in the way of error checking, and
1847 * as such, applications _must_ be considered trustworthy.
1848 * XXX Commands using S/G for data are not supported.
1849 */
1850 static int
mlx_user_command(struct mlx_softc * sc,struct mlx_usercommand * mu)1851 mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu)
1852 {
1853 struct mlx_command *mc;
1854 struct mlx_dcdb *dcdb;
1855 void *kbuf;
1856 int error;
1857
1858 debug_called(0);
1859
1860 kbuf = NULL;
1861 mc = NULL;
1862 dcdb = NULL;
1863 error = ENOMEM;
1864
1865 /* get ourselves a command and copy in from user space */
1866 if ((mc = mlx_alloccmd(sc)) == NULL)
1867 goto out;
1868 bcopy(mu->mu_command, mc->mc_mailbox, sizeof(mc->mc_mailbox));
1869 debug(0, "got command buffer");
1870
1871 /* if we need a buffer for data transfer, allocate one and copy in its initial contents */
1872 if (mu->mu_datasize > 0) {
1873 if (mu->mu_datasize > MAXPHYS)
1874 return (EINVAL);
1875 if (((kbuf = kmalloc(mu->mu_datasize, M_DEVBUF, M_WAITOK)) == NULL) ||
1876 (error = copyin(mu->mu_buf, kbuf, mu->mu_datasize)))
1877 goto out;
1878 debug(0, "got kernel buffer");
1879 }
1880
1881 /* get a command slot */
1882 if (mlx_getslot(mc))
1883 goto out;
1884 debug(0, "got a slot");
1885
1886 /* map the command so the controller can see it */
1887 mc->mc_data = kbuf;
1888 mc->mc_length = mu->mu_datasize;
1889 mlx_mapcmd(mc);
1890 debug(0, "mapped");
1891
1892 /*
1893 * If this is a passthrough SCSI command, the DCDB is packed at the
1894 * beginning of the data area. Fix up the DCDB to point to the correct physical
1895 * address and override any bufptr supplied by the caller since we know
1896 * what it's meant to be.
1897 */
1898 if (mc->mc_mailbox[0] == MLX_CMD_DIRECT_CDB) {
1899 dcdb = (struct mlx_dcdb *)kbuf;
1900 dcdb->dcdb_physaddr = mc->mc_dataphys + sizeof(*dcdb);
1901 mu->mu_bufptr = 8;
1902 }
1903
1904 /*
1905 * If there's a data buffer, fix up the command's buffer pointer.
1906 */
1907 if (mu->mu_datasize > 0) {
1908
1909 /* range check the pointer to physical buffer address */
1910 if ((mu->mu_bufptr < 0) || (mu->mu_bufptr > (sizeof(mu->mu_command) - sizeof(u_int32_t)))) {
1911 error = EINVAL;
1912 goto out;
1913 }
1914 mc->mc_mailbox[mu->mu_bufptr ] = mc->mc_dataphys & 0xff;
1915 mc->mc_mailbox[mu->mu_bufptr + 1] = (mc->mc_dataphys >> 8) & 0xff;
1916 mc->mc_mailbox[mu->mu_bufptr + 2] = (mc->mc_dataphys >> 16) & 0xff;
1917 mc->mc_mailbox[mu->mu_bufptr + 3] = (mc->mc_dataphys >> 24) & 0xff;
1918 }
1919 debug(0, "command fixup");
1920
1921 /* submit the command and wait */
1922 if ((error = mlx_wait_command(mc)) != 0)
1923 goto out;
1924
1925 /* copy out status and data */
1926 mu->mu_status = mc->mc_status;
1927 if ((mu->mu_datasize > 0) && ((error = copyout(kbuf, mu->mu_buf, mu->mu_datasize))))
1928 goto out;
1929 error = 0;
1930
1931 out:
1932 mlx_releasecmd(mc);
1933 if (kbuf != NULL)
1934 kfree(kbuf, M_DEVBUF);
1935 return(error);
1936 }
1937
1938 /********************************************************************************
1939 ********************************************************************************
1940 Command I/O to Controller
1941 ********************************************************************************
1942 ********************************************************************************/
1943
1944 /********************************************************************************
1945 * Find a free command slot for (mc).
1946 *
1947 * Don't hand out a slot to a normal-priority command unless there are at least
1948 * 4 slots free for priority commands.
1949 */
1950 static int
mlx_getslot(struct mlx_command * mc)1951 mlx_getslot(struct mlx_command *mc)
1952 {
1953 struct mlx_softc *sc = mc->mc_sc;
1954 int slot, limit;
1955
1956 debug_called(1);
1957
1958 /*
1959 * Enforce slot-usage limit, if we have the required information.
1960 */
1961 if (sc->mlx_enq2 != NULL) {
1962 limit = sc->mlx_enq2->me_max_commands;
1963 } else {
1964 limit = 2;
1965 }
1966 if (sc->mlx_busycmds >= ((mc->mc_flags & MLX_CMD_PRIORITY) ? limit : limit - 4))
1967 return(EBUSY);
1968
1969 /*
1970 * Allocate an outstanding command slot
1971 *
1972 * XXX linear search is slow
1973 */
1974 crit_enter();
1975 for (slot = 0; slot < limit; slot++) {
1976 debug(2, "try slot %d", slot);
1977 if (sc->mlx_busycmd[slot] == NULL)
1978 break;
1979 }
1980 if (slot < limit) {
1981 sc->mlx_busycmd[slot] = mc;
1982 sc->mlx_busycmds++;
1983 }
1984 crit_exit();
1985
1986 /* out of slots? */
1987 if (slot >= limit)
1988 return(EBUSY);
1989
1990 debug(2, "got slot %d", slot);
1991 mc->mc_slot = slot;
1992 return(0);
1993 }
1994
1995 /********************************************************************************
1996 * Map/unmap (mc)'s data in the controller's addressable space.
1997 */
1998 static void
mlx_setup_dmamap(void * arg,bus_dma_segment_t * segs,int nsegments,int error)1999 mlx_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
2000 {
2001 struct mlx_command *mc = (struct mlx_command *)arg;
2002 struct mlx_softc *sc = mc->mc_sc;
2003 struct mlx_sgentry *sg;
2004 int i;
2005
2006 debug_called(1);
2007
2008 /* XXX should be unnecessary */
2009 if (sc->mlx_enq2 && (nsegments > sc->mlx_enq2->me_max_sg))
2010 panic("MLX: too many s/g segments (%d, max %d)", nsegments, sc->mlx_enq2->me_max_sg);
2011
2012 /* get base address of s/g table */
2013 sg = sc->mlx_sgtable + (mc->mc_slot * MLX_NSEG);
2014
2015 /* save s/g table information in command */
2016 mc->mc_nsgent = nsegments;
2017 mc->mc_sgphys = sc->mlx_sgbusaddr + (mc->mc_slot * MLX_NSEG * sizeof(struct mlx_sgentry));
2018 mc->mc_dataphys = segs[0].ds_addr;
2019
2020 /* populate s/g table */
2021 for (i = 0; i < nsegments; i++, sg++) {
2022 sg->sg_addr = segs[i].ds_addr;
2023 sg->sg_count = segs[i].ds_len;
2024 }
2025 }
2026
2027 static void
mlx_mapcmd(struct mlx_command * mc)2028 mlx_mapcmd(struct mlx_command *mc)
2029 {
2030 struct mlx_softc *sc = mc->mc_sc;
2031
2032 debug_called(1);
2033
2034 /* if the command involves data at all */
2035 if (mc->mc_data != NULL) {
2036
2037 /* map the data buffer into bus space and build the s/g list */
2038 bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length,
2039 mlx_setup_dmamap, mc, 0);
2040 if (mc->mc_flags & MLX_CMD_DATAIN)
2041 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_PREREAD);
2042 if (mc->mc_flags & MLX_CMD_DATAOUT)
2043 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_PREWRITE);
2044 }
2045 }
2046
2047 static void
mlx_unmapcmd(struct mlx_command * mc)2048 mlx_unmapcmd(struct mlx_command *mc)
2049 {
2050 struct mlx_softc *sc = mc->mc_sc;
2051
2052 debug_called(1);
2053
2054 /* if the command involved data at all */
2055 if (mc->mc_data != NULL) {
2056
2057 if (mc->mc_flags & MLX_CMD_DATAIN)
2058 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTREAD);
2059 if (mc->mc_flags & MLX_CMD_DATAOUT)
2060 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTWRITE);
2061
2062 bus_dmamap_unload(sc->mlx_buffer_dmat, mc->mc_dmamap);
2063 }
2064 }
2065
2066 /********************************************************************************
2067 * Try to deliver (mc) to the controller.
2068 *
2069 * Can be called at any interrupt level, with or without interrupts enabled.
2070 */
2071 static int
mlx_start(struct mlx_command * mc)2072 mlx_start(struct mlx_command *mc)
2073 {
2074 struct mlx_softc *sc = mc->mc_sc;
2075 int i, done;
2076
2077 debug_called(1);
2078
2079 /* save the slot number as ident so we can handle this command when complete */
2080 mc->mc_mailbox[0x1] = mc->mc_slot;
2081
2082 /* mark the command as currently being processed */
2083 mc->mc_status = MLX_STATUS_BUSY;
2084
2085 /* set a default 60-second timeout XXX tunable? XXX not currently used */
2086 mc->mc_timeout = time_uptime + 60;
2087
2088 /* spin waiting for the mailbox */
2089 for (i = 100000, done = 0; (i > 0) && !done; i--) {
2090 crit_enter();
2091 if (sc->mlx_tryqueue(sc, mc)) {
2092 done = 1;
2093 /* move command to work queue */
2094 TAILQ_INSERT_TAIL(&sc->mlx_work, mc, mc_link);
2095 }
2096 crit_exit(); /* drop spl to allow completion interrupts */
2097 }
2098
2099 /* command is enqueued */
2100 if (done)
2101 return(0);
2102
2103 /*
2104 * We couldn't get the controller to take the command. Revoke the slot
2105 * that the command was given and return it with a bad status.
2106 */
2107 sc->mlx_busycmd[mc->mc_slot] = NULL;
2108 device_printf(sc->mlx_dev, "controller wedged (not taking commands)\n");
2109 mc->mc_status = MLX_STATUS_WEDGED;
2110 mlx_complete(sc);
2111 return(EIO);
2112 }
2113
2114 /********************************************************************************
2115 * Poll the controller (sc) for completed commands.
2116 * Update command status and free slots for reuse. If any slots were freed,
2117 * new commands may be posted.
2118 *
2119 * Returns nonzero if one or more commands were completed.
2120 */
2121 static int
mlx_done(struct mlx_softc * sc)2122 mlx_done(struct mlx_softc *sc)
2123 {
2124 struct mlx_command *mc;
2125 int result;
2126 u_int8_t slot;
2127 u_int16_t status;
2128
2129 debug_called(2);
2130
2131 result = 0;
2132
2133 /* loop collecting completed commands */
2134 crit_enter();
2135 for (;;) {
2136 /* poll for a completed command's identifier and status */
2137 if (sc->mlx_findcomplete(sc, &slot, &status)) {
2138 result = 1;
2139 mc = sc->mlx_busycmd[slot]; /* find command */
2140 if (mc != NULL) { /* paranoia */
2141 if (mc->mc_status == MLX_STATUS_BUSY) {
2142 mc->mc_status = status; /* save status */
2143
2144 /* free slot for reuse */
2145 sc->mlx_busycmd[slot] = NULL;
2146 sc->mlx_busycmds--;
2147 } else {
2148 device_printf(sc->mlx_dev, "duplicate done event for slot %d\n", slot);
2149 }
2150 } else {
2151 device_printf(sc->mlx_dev, "done event for nonbusy slot %d\n", slot);
2152 }
2153 } else {
2154 break;
2155 }
2156 }
2157 crit_exit();
2158
2159 /* if we've completed any commands, try posting some more */
2160 if (result)
2161 mlx_startio(sc);
2162
2163 /* handle completion and timeouts */
2164 mlx_complete(sc);
2165
2166 return(result);
2167 }
2168
2169 /********************************************************************************
2170 * Perform post-completion processing for commands on (sc).
2171 */
2172 static void
mlx_complete(struct mlx_softc * sc)2173 mlx_complete(struct mlx_softc *sc)
2174 {
2175 struct mlx_command *mc, *nc;
2176
2177 debug_called(2);
2178
2179 /* avoid reentrancy XXX might want to signal and request a restart */
2180 if (mlx_lock_tas(sc, MLX_LOCK_COMPLETING))
2181 return;
2182
2183 crit_enter();
2184
2185 /* scan the list of busy/done commands */
2186 mc = TAILQ_FIRST(&sc->mlx_work);
2187 while (mc != NULL) {
2188 nc = TAILQ_NEXT(mc, mc_link);
2189
2190 /* Command has been completed in some fashion */
2191 if (mc->mc_status != MLX_STATUS_BUSY) {
2192
2193 /* unmap the command's data buffer */
2194 mlx_unmapcmd(mc);
2195 /*
2196 * Does the command have a completion handler?
2197 */
2198 if (mc->mc_complete != NULL) {
2199 /* remove from list and give to handler */
2200 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link);
2201 mc->mc_complete(mc);
2202
2203 /*
2204 * Is there a sleeper waiting on this command?
2205 */
2206 } else if (mc->mc_private != NULL) { /* sleeping caller wants to know about it */
2207
2208 /* remove from list and wake up sleeper */
2209 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link);
2210 wakeup_one(mc->mc_private);
2211
2212 /*
2213 * Leave the command for a caller that's polling for it.
2214 */
2215 } else {
2216 }
2217 }
2218 mc = nc;
2219 }
2220 crit_exit();
2221
2222 mlx_lock_clr(sc, MLX_LOCK_COMPLETING);
2223 }
2224
2225 /********************************************************************************
2226 ********************************************************************************
2227 Command Buffer Management
2228 ********************************************************************************
2229 ********************************************************************************/
2230
2231 /********************************************************************************
2232 * Get a new command buffer.
2233 *
2234 * This may return NULL in low-memory cases.
2235 *
2236 * Note that using malloc() is expensive (the command buffer is << 1 page) but
2237 * necessary if we are to be a loadable module before the zone allocator is fixed.
2238 *
2239 * If possible, we recycle a command buffer that's been used before.
2240 *
2241 * XXX Note that command buffers are not cleaned out - it is the caller's
2242 * responsibility to ensure that all required fields are filled in before
2243 * using a buffer.
2244 */
2245 static struct mlx_command *
mlx_alloccmd(struct mlx_softc * sc)2246 mlx_alloccmd(struct mlx_softc *sc)
2247 {
2248 struct mlx_command *mc;
2249 int error;
2250
2251 debug_called(1);
2252
2253 crit_enter();
2254 if ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL)
2255 TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link);
2256 crit_exit();
2257
2258 /* allocate a new command buffer? */
2259 if (mc == NULL) {
2260 mc = kmalloc(sizeof(*mc), M_DEVBUF, M_INTWAIT | M_ZERO);
2261 mc->mc_sc = sc;
2262 error = bus_dmamap_create(sc->mlx_buffer_dmat, 0, &mc->mc_dmamap);
2263 if (error) {
2264 kfree(mc, M_DEVBUF);
2265 return(NULL);
2266 }
2267 }
2268 return(mc);
2269 }
2270
2271 /********************************************************************************
2272 * Release a command buffer for recycling.
2273 *
2274 * XXX It might be a good idea to limit the number of commands we save for reuse
2275 * if it's shown that this list bloats out massively.
2276 */
2277 static void
mlx_releasecmd(struct mlx_command * mc)2278 mlx_releasecmd(struct mlx_command *mc)
2279 {
2280 debug_called(1);
2281
2282 crit_enter();
2283 TAILQ_INSERT_HEAD(&mc->mc_sc->mlx_freecmds, mc, mc_link);
2284 crit_exit();
2285 }
2286
2287 /********************************************************************************
2288 * Permanently discard a command buffer.
2289 */
2290 static void
mlx_freecmd(struct mlx_command * mc)2291 mlx_freecmd(struct mlx_command *mc)
2292 {
2293 struct mlx_softc *sc = mc->mc_sc;
2294
2295 debug_called(1);
2296 bus_dmamap_destroy(sc->mlx_buffer_dmat, mc->mc_dmamap);
2297 kfree(mc, M_DEVBUF);
2298 }
2299
2300
2301 /********************************************************************************
2302 ********************************************************************************
2303 Type 3 interface accessor methods
2304 ********************************************************************************
2305 ********************************************************************************/
2306
2307 /********************************************************************************
2308 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2309 * (the controller is not ready to take a command).
2310 *
2311 * Must be called at splbio or in a fashion that prevents reentry.
2312 */
2313 static int
mlx_v3_tryqueue(struct mlx_softc * sc,struct mlx_command * mc)2314 mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc)
2315 {
2316 int i;
2317
2318 debug_called(2);
2319
2320 /* ready for our command? */
2321 if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_FULL)) {
2322 /* copy mailbox data to window */
2323 for (i = 0; i < 13; i++)
2324 MLX_V3_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]);
2325
2326 /* post command */
2327 MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_FULL);
2328 return(1);
2329 }
2330 return(0);
2331 }
2332
2333 /********************************************************************************
2334 * See if a command has been completed, if so acknowledge its completion
2335 * and recover the slot number and status code.
2336 *
2337 * Must be called at splbio or in a fashion that prevents reentry.
2338 */
2339 static int
mlx_v3_findcomplete(struct mlx_softc * sc,u_int8_t * slot,u_int16_t * status)2340 mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status)
2341 {
2342
2343 debug_called(2);
2344
2345 /* status available? */
2346 if (MLX_V3_GET_ODBR(sc) & MLX_V3_ODB_SAVAIL) {
2347 *slot = MLX_V3_GET_STATUS_IDENT(sc); /* get command identifier */
2348 *status = MLX_V3_GET_STATUS(sc); /* get status */
2349
2350 /* acknowledge completion */
2351 MLX_V3_PUT_ODBR(sc, MLX_V3_ODB_SAVAIL);
2352 MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK);
2353 return(1);
2354 }
2355 return(0);
2356 }
2357
2358 /********************************************************************************
2359 * Enable/disable interrupts as requested. (No acknowledge required)
2360 *
2361 * Must be called at splbio or in a fashion that prevents reentry.
2362 */
2363 static void
mlx_v3_intaction(struct mlx_softc * sc,int action)2364 mlx_v3_intaction(struct mlx_softc *sc, int action)
2365 {
2366 debug_called(1);
2367
2368 switch(action) {
2369 case MLX_INTACTION_DISABLE:
2370 MLX_V3_PUT_IER(sc, 0);
2371 sc->mlx_state &= ~MLX_STATE_INTEN;
2372 break;
2373 case MLX_INTACTION_ENABLE:
2374 MLX_V3_PUT_IER(sc, 1);
2375 sc->mlx_state |= MLX_STATE_INTEN;
2376 break;
2377 }
2378 }
2379
2380 /********************************************************************************
2381 * Poll for firmware error codes during controller initialisation.
2382 * Returns 0 if initialisation is complete, 1 if still in progress but no
2383 * error has been fetched, 2 if an error has been retrieved.
2384 */
2385 static int
mlx_v3_fw_handshake(struct mlx_softc * sc,int * error,int * param1,int * param2)2386 mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2)
2387 {
2388 u_int8_t fwerror;
2389 static int initted = 0;
2390
2391 debug_called(2);
2392
2393 /* first time around, clear any hardware completion status */
2394 if (!initted) {
2395 MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK);
2396 DELAY(1000);
2397 initted = 1;
2398 }
2399
2400 /* init in progress? */
2401 if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_INIT_BUSY))
2402 return(0);
2403
2404 /* test error value */
2405 fwerror = MLX_V3_GET_FWERROR(sc);
2406 if (!(fwerror & MLX_V3_FWERROR_PEND))
2407 return(1);
2408
2409 /* mask status pending bit, fetch status */
2410 *error = fwerror & ~MLX_V3_FWERROR_PEND;
2411 *param1 = MLX_V3_GET_FWERROR_PARAM1(sc);
2412 *param2 = MLX_V3_GET_FWERROR_PARAM2(sc);
2413
2414 /* acknowledge */
2415 MLX_V3_PUT_FWERROR(sc, 0);
2416
2417 return(2);
2418 }
2419
2420 /********************************************************************************
2421 ********************************************************************************
2422 Type 4 interface accessor methods
2423 ********************************************************************************
2424 ********************************************************************************/
2425
2426 /********************************************************************************
2427 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2428 * (the controller is not ready to take a command).
2429 *
2430 * Must be called at splbio or in a fashion that prevents reentry.
2431 */
2432 static int
mlx_v4_tryqueue(struct mlx_softc * sc,struct mlx_command * mc)2433 mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc)
2434 {
2435 int i;
2436
2437 debug_called(2);
2438
2439 /* ready for our command? */
2440 if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_FULL)) {
2441 /* copy mailbox data to window */
2442 for (i = 0; i < 13; i++)
2443 MLX_V4_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]);
2444
2445 /* memory-mapped controller, so issue a write barrier to ensure the mailbox is filled */
2446 bus_space_barrier(sc->mlx_btag, sc->mlx_bhandle, MLX_V4_MAILBOX, MLX_V4_MAILBOX_LENGTH,
2447 BUS_SPACE_BARRIER_WRITE);
2448
2449 /* post command */
2450 MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_HWMBOX_CMD);
2451 return(1);
2452 }
2453 return(0);
2454 }
2455
2456 /********************************************************************************
2457 * See if a command has been completed, if so acknowledge its completion
2458 * and recover the slot number and status code.
2459 *
2460 * Must be called at splbio or in a fashion that prevents reentry.
2461 */
2462 static int
mlx_v4_findcomplete(struct mlx_softc * sc,u_int8_t * slot,u_int16_t * status)2463 mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status)
2464 {
2465
2466 debug_called(2);
2467
2468 /* status available? */
2469 if (MLX_V4_GET_ODBR(sc) & MLX_V4_ODB_HWSAVAIL) {
2470 *slot = MLX_V4_GET_STATUS_IDENT(sc); /* get command identifier */
2471 *status = MLX_V4_GET_STATUS(sc); /* get status */
2472
2473 /* acknowledge completion */
2474 MLX_V4_PUT_ODBR(sc, MLX_V4_ODB_HWMBOX_ACK);
2475 MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK);
2476 return(1);
2477 }
2478 return(0);
2479 }
2480
2481 /********************************************************************************
2482 * Enable/disable interrupts as requested.
2483 *
2484 * Must be called at splbio or in a fashion that prevents reentry.
2485 */
2486 static void
mlx_v4_intaction(struct mlx_softc * sc,int action)2487 mlx_v4_intaction(struct mlx_softc *sc, int action)
2488 {
2489 debug_called(1);
2490
2491 switch(action) {
2492 case MLX_INTACTION_DISABLE:
2493 MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK | MLX_V4_IER_DISINT);
2494 sc->mlx_state &= ~MLX_STATE_INTEN;
2495 break;
2496 case MLX_INTACTION_ENABLE:
2497 MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK & ~MLX_V4_IER_DISINT);
2498 sc->mlx_state |= MLX_STATE_INTEN;
2499 break;
2500 }
2501 }
2502
2503 /********************************************************************************
2504 * Poll for firmware error codes during controller initialisation.
2505 * Returns 0 if initialisation is complete, 1 if still in progress but no
2506 * error has been fetched, 2 if an error has been retrieved.
2507 */
2508 static int
mlx_v4_fw_handshake(struct mlx_softc * sc,int * error,int * param1,int * param2)2509 mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2)
2510 {
2511 u_int8_t fwerror;
2512 static int initted = 0;
2513
2514 debug_called(2);
2515
2516 /* first time around, clear any hardware completion status */
2517 if (!initted) {
2518 MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK);
2519 DELAY(1000);
2520 initted = 1;
2521 }
2522
2523 /* init in progress? */
2524 if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_INIT_BUSY))
2525 return(0);
2526
2527 /* test error value */
2528 fwerror = MLX_V4_GET_FWERROR(sc);
2529 if (!(fwerror & MLX_V4_FWERROR_PEND))
2530 return(1);
2531
2532 /* mask status pending bit, fetch status */
2533 *error = fwerror & ~MLX_V4_FWERROR_PEND;
2534 *param1 = MLX_V4_GET_FWERROR_PARAM1(sc);
2535 *param2 = MLX_V4_GET_FWERROR_PARAM2(sc);
2536
2537 /* acknowledge */
2538 MLX_V4_PUT_FWERROR(sc, 0);
2539
2540 return(2);
2541 }
2542
2543 /********************************************************************************
2544 ********************************************************************************
2545 Type 5 interface accessor methods
2546 ********************************************************************************
2547 ********************************************************************************/
2548
2549 /********************************************************************************
2550 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2551 * (the controller is not ready to take a command).
2552 *
2553 * Must be called at splbio or in a fashion that prevents reentry.
2554 */
2555 static int
mlx_v5_tryqueue(struct mlx_softc * sc,struct mlx_command * mc)2556 mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc)
2557 {
2558 int i;
2559
2560 debug_called(2);
2561
2562 /* ready for our command? */
2563 if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_EMPTY) {
2564 /* copy mailbox data to window */
2565 for (i = 0; i < 13; i++)
2566 MLX_V5_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]);
2567
2568 /* post command */
2569 MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_HWMBOX_CMD);
2570 return(1);
2571 }
2572 return(0);
2573 }
2574
2575 /********************************************************************************
2576 * See if a command has been completed, if so acknowledge its completion
2577 * and recover the slot number and status code.
2578 *
2579 * Must be called at splbio or in a fashion that prevents reentry.
2580 */
2581 static int
mlx_v5_findcomplete(struct mlx_softc * sc,u_int8_t * slot,u_int16_t * status)2582 mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status)
2583 {
2584
2585 debug_called(2);
2586
2587 /* status available? */
2588 if (MLX_V5_GET_ODBR(sc) & MLX_V5_ODB_HWSAVAIL) {
2589 *slot = MLX_V5_GET_STATUS_IDENT(sc); /* get command identifier */
2590 *status = MLX_V5_GET_STATUS(sc); /* get status */
2591
2592 /* acknowledge completion */
2593 MLX_V5_PUT_ODBR(sc, MLX_V5_ODB_HWMBOX_ACK);
2594 MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK);
2595 return(1);
2596 }
2597 return(0);
2598 }
2599
2600 /********************************************************************************
2601 * Enable/disable interrupts as requested.
2602 *
2603 * Must be called at splbio or in a fashion that prevents reentry.
2604 */
2605 static void
mlx_v5_intaction(struct mlx_softc * sc,int action)2606 mlx_v5_intaction(struct mlx_softc *sc, int action)
2607 {
2608 debug_called(1);
2609
2610 switch(action) {
2611 case MLX_INTACTION_DISABLE:
2612 MLX_V5_PUT_IER(sc, 0xff & MLX_V5_IER_DISINT);
2613 sc->mlx_state &= ~MLX_STATE_INTEN;
2614 break;
2615 case MLX_INTACTION_ENABLE:
2616 MLX_V5_PUT_IER(sc, 0xff & ~MLX_V5_IER_DISINT);
2617 sc->mlx_state |= MLX_STATE_INTEN;
2618 break;
2619 }
2620 }
2621
2622 /********************************************************************************
2623 * Poll for firmware error codes during controller initialisation.
2624 * Returns 0 if initialisation is complete, 1 if still in progress but no
2625 * error has been fetched, 2 if an error has been retrieved.
2626 */
2627 static int
mlx_v5_fw_handshake(struct mlx_softc * sc,int * error,int * param1,int * param2)2628 mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2)
2629 {
2630 u_int8_t fwerror;
2631 static int initted = 0;
2632
2633 debug_called(2);
2634
2635 /* first time around, clear any hardware completion status */
2636 if (!initted) {
2637 MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK);
2638 DELAY(1000);
2639 initted = 1;
2640 }
2641
2642 /* init in progress? */
2643 if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_INIT_DONE)
2644 return(0);
2645
2646 /* test for error value */
2647 fwerror = MLX_V5_GET_FWERROR(sc);
2648 if (!(fwerror & MLX_V5_FWERROR_PEND))
2649 return(1);
2650
2651 /* mask status pending bit, fetch status */
2652 *error = fwerror & ~MLX_V5_FWERROR_PEND;
2653 *param1 = MLX_V5_GET_FWERROR_PARAM1(sc);
2654 *param2 = MLX_V5_GET_FWERROR_PARAM2(sc);
2655
2656 /* acknowledge */
2657 MLX_V5_PUT_FWERROR(sc, 0xff);
2658
2659 return(2);
2660 }
2661
2662 /********************************************************************************
2663 ********************************************************************************
2664 Debugging
2665 ********************************************************************************
2666 ********************************************************************************/
2667
2668 /********************************************************************************
2669 * Return a status message describing (mc)
2670 */
2671 static char *mlx_status_messages[] = {
2672 "normal completion", /* 00 */
2673 "irrecoverable data error", /* 01 */
2674 "drive does not exist, or is offline", /* 02 */
2675 "attempt to write beyond end of drive", /* 03 */
2676 "bad data encountered", /* 04 */
2677 "invalid log entry request", /* 05 */
2678 "attempt to rebuild online drive", /* 06 */
2679 "new disk failed during rebuild", /* 07 */
2680 "invalid channel/target", /* 08 */
2681 "rebuild/check already in progress", /* 09 */
2682 "one or more disks are dead", /* 10 */
2683 "invalid or non-redundant drive", /* 11 */
2684 "channel is busy", /* 12 */
2685 "channel is not stopped", /* 13 */
2686 "rebuild successfully terminated", /* 14 */
2687 "unsupported command", /* 15 */
2688 "check condition received", /* 16 */
2689 "device is busy", /* 17 */
2690 "selection or command timeout", /* 18 */
2691 "command terminated abnormally", /* 19 */
2692 ""
2693 };
2694
2695 static struct
2696 {
2697 int command;
2698 u_int16_t status;
2699 int msg;
2700 } mlx_messages[] = {
2701 {MLX_CMD_READSG, 0x0001, 1},
2702 {MLX_CMD_READSG, 0x0002, 1},
2703 {MLX_CMD_READSG, 0x0105, 3},
2704 {MLX_CMD_READSG, 0x010c, 4},
2705 {MLX_CMD_WRITESG, 0x0001, 1},
2706 {MLX_CMD_WRITESG, 0x0002, 1},
2707 {MLX_CMD_WRITESG, 0x0105, 3},
2708 {MLX_CMD_READSG_OLD, 0x0001, 1},
2709 {MLX_CMD_READSG_OLD, 0x0002, 1},
2710 {MLX_CMD_READSG_OLD, 0x0105, 3},
2711 {MLX_CMD_WRITESG_OLD, 0x0001, 1},
2712 {MLX_CMD_WRITESG_OLD, 0x0002, 1},
2713 {MLX_CMD_WRITESG_OLD, 0x0105, 3},
2714 {MLX_CMD_LOGOP, 0x0105, 5},
2715 {MLX_CMD_REBUILDASYNC, 0x0002, 6},
2716 {MLX_CMD_REBUILDASYNC, 0x0004, 7},
2717 {MLX_CMD_REBUILDASYNC, 0x0105, 8},
2718 {MLX_CMD_REBUILDASYNC, 0x0106, 9},
2719 {MLX_CMD_REBUILDASYNC, 0x0107, 14},
2720 {MLX_CMD_CHECKASYNC, 0x0002, 10},
2721 {MLX_CMD_CHECKASYNC, 0x0105, 11},
2722 {MLX_CMD_CHECKASYNC, 0x0106, 9},
2723 {MLX_CMD_STOPCHANNEL, 0x0106, 12},
2724 {MLX_CMD_STOPCHANNEL, 0x0105, 8},
2725 {MLX_CMD_STARTCHANNEL, 0x0005, 13},
2726 {MLX_CMD_STARTCHANNEL, 0x0105, 8},
2727 {MLX_CMD_DIRECT_CDB, 0x0002, 16},
2728 {MLX_CMD_DIRECT_CDB, 0x0008, 17},
2729 {MLX_CMD_DIRECT_CDB, 0x000e, 18},
2730 {MLX_CMD_DIRECT_CDB, 0x000f, 19},
2731 {MLX_CMD_DIRECT_CDB, 0x0105, 8},
2732
2733 {0, 0x0104, 14},
2734 {-1, 0, 0}
2735 };
2736
2737 static char *
mlx_diagnose_command(struct mlx_command * mc)2738 mlx_diagnose_command(struct mlx_command *mc)
2739 {
2740 static char unkmsg[80];
2741 int i;
2742
2743 /* look up message in table */
2744 for (i = 0; mlx_messages[i].command != -1; i++)
2745 if (((mc->mc_mailbox[0] == mlx_messages[i].command) || (mlx_messages[i].command == 0)) &&
2746 (mc->mc_status == mlx_messages[i].status))
2747 return(mlx_status_messages[mlx_messages[i].msg]);
2748
2749 ksprintf(unkmsg, "unknown response 0x%x for command 0x%x", (int)mc->mc_status, (int)mc->mc_mailbox[0]);
2750 return(unkmsg);
2751 }
2752
2753 /*******************************************************************************
2754 * Print a string describing the controller (sc)
2755 */
2756 static struct
2757 {
2758 int hwid;
2759 char *name;
2760 } mlx_controller_names[] = {
2761 {0x01, "960P/PD"},
2762 {0x02, "960PL"},
2763 {0x10, "960PG"},
2764 {0x11, "960PJ"},
2765 {0x12, "960PR"},
2766 {0x13, "960PT"},
2767 {0x14, "960PTL0"},
2768 {0x15, "960PRL"},
2769 {0x16, "960PTL1"},
2770 {0x20, "1164PVX"},
2771 {-1, NULL}
2772 };
2773
2774 static void
mlx_describe_controller(struct mlx_softc * sc)2775 mlx_describe_controller(struct mlx_softc *sc)
2776 {
2777 static char buf[80];
2778 char *model;
2779 int i;
2780
2781 for (i = 0, model = NULL; mlx_controller_names[i].name != NULL; i++) {
2782 if ((sc->mlx_enq2->me_hardware_id & 0xff) == mlx_controller_names[i].hwid) {
2783 model = mlx_controller_names[i].name;
2784 break;
2785 }
2786 }
2787 if (model == NULL) {
2788 ksprintf(buf, " model 0x%x", sc->mlx_enq2->me_hardware_id & 0xff);
2789 model = buf;
2790 }
2791 device_printf(sc->mlx_dev, "DAC%s, %d channel%s, firmware %d.%02d-%c-%02d, %dMB RAM\n",
2792 model,
2793 sc->mlx_enq2->me_actual_channels,
2794 sc->mlx_enq2->me_actual_channels > 1 ? "s" : "",
2795 sc->mlx_enq2->me_firmware_id & 0xff,
2796 (sc->mlx_enq2->me_firmware_id >> 8) & 0xff,
2797 (sc->mlx_enq2->me_firmware_id >> 24) & 0xff,
2798 (sc->mlx_enq2->me_firmware_id >> 16) & 0xff,
2799 sc->mlx_enq2->me_mem_size / (1024 * 1024));
2800
2801 if (bootverbose) {
2802 device_printf(sc->mlx_dev, " Hardware ID 0x%08x\n", sc->mlx_enq2->me_hardware_id);
2803 device_printf(sc->mlx_dev, " Firmware ID 0x%08x\n", sc->mlx_enq2->me_firmware_id);
2804 device_printf(sc->mlx_dev, " Configured/Actual channels %d/%d\n", sc->mlx_enq2->me_configured_channels,
2805 sc->mlx_enq2->me_actual_channels);
2806 device_printf(sc->mlx_dev, " Max Targets %d\n", sc->mlx_enq2->me_max_targets);
2807 device_printf(sc->mlx_dev, " Max Tags %d\n", sc->mlx_enq2->me_max_tags);
2808 device_printf(sc->mlx_dev, " Max System Drives %d\n", sc->mlx_enq2->me_max_sys_drives);
2809 device_printf(sc->mlx_dev, " Max Arms %d\n", sc->mlx_enq2->me_max_arms);
2810 device_printf(sc->mlx_dev, " Max Spans %d\n", sc->mlx_enq2->me_max_spans);
2811 device_printf(sc->mlx_dev, " DRAM/cache/flash/NVRAM size %d/%d/%d/%d\n", sc->mlx_enq2->me_mem_size,
2812 sc->mlx_enq2->me_cache_size, sc->mlx_enq2->me_flash_size, sc->mlx_enq2->me_nvram_size);
2813 device_printf(sc->mlx_dev, " DRAM type %d\n", sc->mlx_enq2->me_mem_type);
2814 device_printf(sc->mlx_dev, " Clock Speed %dns\n", sc->mlx_enq2->me_clock_speed);
2815 device_printf(sc->mlx_dev, " Hardware Speed %dns\n", sc->mlx_enq2->me_hardware_speed);
2816 device_printf(sc->mlx_dev, " Max Commands %d\n", sc->mlx_enq2->me_max_commands);
2817 device_printf(sc->mlx_dev, " Max SG Entries %d\n", sc->mlx_enq2->me_max_sg);
2818 device_printf(sc->mlx_dev, " Max DP %d\n", sc->mlx_enq2->me_max_dp);
2819 device_printf(sc->mlx_dev, " Max IOD %d\n", sc->mlx_enq2->me_max_iod);
2820 device_printf(sc->mlx_dev, " Max Comb %d\n", sc->mlx_enq2->me_max_comb);
2821 device_printf(sc->mlx_dev, " Latency %ds\n", sc->mlx_enq2->me_latency);
2822 device_printf(sc->mlx_dev, " SCSI Timeout %ds\n", sc->mlx_enq2->me_scsi_timeout);
2823 device_printf(sc->mlx_dev, " Min Free Lines %d\n", sc->mlx_enq2->me_min_freelines);
2824 device_printf(sc->mlx_dev, " Rate Constant %d\n", sc->mlx_enq2->me_rate_const);
2825 device_printf(sc->mlx_dev, " MAXBLK %d\n", sc->mlx_enq2->me_maxblk);
2826 device_printf(sc->mlx_dev, " Blocking Factor %d sectors\n", sc->mlx_enq2->me_blocking_factor);
2827 device_printf(sc->mlx_dev, " Cache Line Size %d blocks\n", sc->mlx_enq2->me_cacheline);
2828 device_printf(sc->mlx_dev, " SCSI Capability %s%dMHz, %d bit\n",
2829 sc->mlx_enq2->me_scsi_cap & (1<<4) ? "differential " : "",
2830 (1 << ((sc->mlx_enq2->me_scsi_cap >> 2) & 3)) * 10,
2831 8 << (sc->mlx_enq2->me_scsi_cap & 0x3));
2832 device_printf(sc->mlx_dev, " Firmware Build Number %d\n", sc->mlx_enq2->me_firmware_build);
2833 device_printf(sc->mlx_dev, " Fault Management Type %d\n", sc->mlx_enq2->me_fault_mgmt_type);
2834 device_printf(sc->mlx_dev, " Features %pb%i\n",
2835 "\20\4Background Init\3Read Ahead\2MORE\1Cluster\n",
2836 sc->mlx_enq2->me_firmware_features);
2837 }
2838 }
2839
2840 /*******************************************************************************
2841 * Emit a string describing the firmware handshake status code, and return a flag
2842 * indicating whether the code represents a fatal error.
2843 *
2844 * Error code interpretations are from the Linux driver, and don't directly match
2845 * the messages printed by Mylex's BIOS. This may change if documentation on the
2846 * codes is forthcoming.
2847 */
2848 static int
mlx_fw_message(struct mlx_softc * sc,int error,int param1,int param2)2849 mlx_fw_message(struct mlx_softc *sc, int error, int param1, int param2)
2850 {
2851 switch(error) {
2852 case 0x00:
2853 device_printf(sc->mlx_dev, "physical drive %d:%d not responding\n", param2, param1);
2854 break;
2855 case 0x08:
2856 /* we could be neater about this and give some indication when we receive more of them */
2857 if (!(sc->mlx_flags & MLX_SPINUP_REPORTED)) {
2858 device_printf(sc->mlx_dev, "spinning up drives...\n");
2859 sc->mlx_flags |= MLX_SPINUP_REPORTED;
2860 }
2861 break;
2862 case 0x30:
2863 device_printf(sc->mlx_dev, "configuration checksum error\n");
2864 break;
2865 case 0x60:
2866 device_printf(sc->mlx_dev, "mirror race recovery failed\n");
2867 break;
2868 case 0x70:
2869 device_printf(sc->mlx_dev, "mirror race recovery in progress\n");
2870 break;
2871 case 0x90:
2872 device_printf(sc->mlx_dev, "physical drive %d:%d COD mismatch\n", param2, param1);
2873 break;
2874 case 0xa0:
2875 device_printf(sc->mlx_dev, "logical drive installation aborted\n");
2876 break;
2877 case 0xb0:
2878 device_printf(sc->mlx_dev, "mirror race on a critical system drive\n");
2879 break;
2880 case 0xd0:
2881 device_printf(sc->mlx_dev, "new controller configuration found\n");
2882 break;
2883 case 0xf0:
2884 device_printf(sc->mlx_dev, "FATAL MEMORY PARITY ERROR\n");
2885 return(1);
2886 default:
2887 device_printf(sc->mlx_dev, "unknown firmware initialisation error %02x:%02x:%02x\n", error, param1, param2);
2888 break;
2889 }
2890 return(0);
2891 }
2892
2893 /********************************************************************************
2894 ********************************************************************************
2895 Utility Functions
2896 ********************************************************************************
2897 ********************************************************************************/
2898
2899 /********************************************************************************
2900 * Find the disk whose unit number is (unit) on this controller
2901 */
2902 static struct mlx_sysdrive *
mlx_findunit(struct mlx_softc * sc,int unit)2903 mlx_findunit(struct mlx_softc *sc, int unit)
2904 {
2905 int i;
2906
2907 /* search system drives */
2908 for (i = 0; i < MLX_MAXDRIVES; i++) {
2909 /* is this one attached? */
2910 if (sc->mlx_sysdrive[i].ms_disk != NULL) {
2911 /* is this the one? */
2912 if (unit == device_get_unit(sc->mlx_sysdrive[i].ms_disk))
2913 return(&sc->mlx_sysdrive[i]);
2914 }
2915 }
2916 return(NULL);
2917 }
2918