xref: /netbsd/sys/dev/vme/xy.c (revision bf9ec67e)
1 /*	$NetBSD: xy.c,v 1.39 2002/01/14 13:32:48 tsutsui Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1995 Charles D. Cranor
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgement:
18  *      This product includes software developed by Charles D. Cranor.
19  * 4. The name of the author may not be used to endorse or promote products
20  *    derived from this software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 /*
35  *
36  * x y . c   x y l o g i c s   4 5 0 / 4 5 1   s m d   d r i v e r
37  *
38  * author: Chuck Cranor <chuck@ccrc.wustl.edu>
39  * started: 14-Sep-95
40  * references: [1] Xylogics Model 753 User's Manual
41  *                 part number: 166-753-001, Revision B, May 21, 1988.
42  *                 "Your Partner For Performance"
43  *             [2] other NetBSD disk device drivers
44  *	       [3] Xylogics Model 450 User's Manual
45  *		   part number: 166-017-001, Revision B, 1983.
46  *	       [4] Addendum to Xylogics Model 450 Disk Controller User's
47  *			Manual, Jan. 1985.
48  *	       [5] The 451 Controller, Rev. B3, September 2, 1986.
49  *	       [6] David Jones <dej@achilles.net>'s unfinished 450/451 driver
50  *
51  */
52 
53 #include <sys/cdefs.h>
54 __KERNEL_RCSID(0, "$NetBSD: xy.c,v 1.39 2002/01/14 13:32:48 tsutsui Exp $");
55 
56 #undef XYC_DEBUG		/* full debug */
57 #undef XYC_DIAG			/* extra sanity checks */
58 #if defined(DIAGNOSTIC) && !defined(XYC_DIAG)
59 #define XYC_DIAG		/* link in with master DIAG option */
60 #endif
61 
62 #include <sys/param.h>
63 #include <sys/proc.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/file.h>
67 #include <sys/stat.h>
68 #include <sys/ioctl.h>
69 #include <sys/buf.h>
70 #include <sys/uio.h>
71 #include <sys/malloc.h>
72 #include <sys/device.h>
73 #include <sys/disklabel.h>
74 #include <sys/disk.h>
75 #include <sys/syslog.h>
76 #include <sys/dkbad.h>
77 #include <sys/conf.h>
78 
79 #include <machine/bus.h>
80 #include <machine/intr.h>
81 
82 #if defined(__sparc__) || defined(sun3)
83 #include <dev/sun/disklabel.h>
84 #endif
85 
86 #include <dev/vme/vmereg.h>
87 #include <dev/vme/vmevar.h>
88 
89 #include <dev/vme/xyreg.h>
90 #include <dev/vme/xyvar.h>
91 #include <dev/vme/xio.h>
92 
93 #include "locators.h"
94 
95 /*
96  * macros
97  */
98 
99 /*
100  * XYC_GO: start iopb ADDR (DVMA addr in a u_long) on XYC
101  */
102 #define XYC_GO(XYC, ADDR) { \
103 	(XYC)->xyc_addr_lo = ((ADDR) & 0xff); \
104 	(ADDR) = ((ADDR) >> 8); \
105 	(XYC)->xyc_addr_hi = ((ADDR) & 0xff); \
106 	(ADDR) = ((ADDR) >> 8); \
107 	(XYC)->xyc_reloc_lo = ((ADDR) & 0xff); \
108 	(ADDR) = ((ADDR) >> 8); \
109 	(XYC)->xyc_reloc_hi = (ADDR); \
110 	(XYC)->xyc_csr = XYC_GBSY; /* go! */ \
111 }
112 
113 /*
114  * XYC_DONE: don't need IORQ, get error code and free (done after xyc_cmd)
115  */
116 
117 #define XYC_DONE(SC,ER) { \
118 	if ((ER) == XY_ERR_AOK) { \
119 		(ER) = (SC)->ciorq->errno; \
120 		(SC)->ciorq->mode = XY_SUB_FREE; \
121 		wakeup((SC)->ciorq); \
122 	} \
123 	}
124 
125 /*
126  * XYC_ADVANCE: advance iorq's pointers by a number of sectors
127  */
128 
129 #define XYC_ADVANCE(IORQ, N) { \
130 	if (N) { \
131 		(IORQ)->sectcnt -= (N); \
132 		(IORQ)->blockno += (N); \
133 		(IORQ)->dbuf += ((N)*XYFM_BPS); \
134 	} \
135 }
136 
137 /*
138  * note - addresses you can sleep on:
139  *   [1] & of xy_softc's "state" (waiting for a chance to attach a drive)
140  *   [2] & an iorq (waiting for an XY_SUB_WAIT iorq to finish)
141  */
142 
143 
144 /*
145  * function prototypes
146  * "xyc_*" functions are internal, all others are external interfaces
147  */
148 
149 extern int pil_to_vme[];	/* from obio.c */
150 
151 /* internals */
152 struct xy_iopb *xyc_chain __P((struct xyc_softc *, struct xy_iorq *));
153 int	xyc_cmd __P((struct xyc_softc *, int, int, int, int, int, char *, int));
154 char   *xyc_e2str __P((int));
155 int	xyc_entoact __P((int));
156 int	xyc_error __P((struct xyc_softc *, struct xy_iorq *,
157 		   struct xy_iopb *, int));
158 int	xyc_ioctlcmd __P((struct xy_softc *, dev_t dev, struct xd_iocmd *));
159 void	xyc_perror __P((struct xy_iorq *, struct xy_iopb *, int));
160 int	xyc_piodriver __P((struct xyc_softc *, struct xy_iorq *));
161 int	xyc_remove_iorq __P((struct xyc_softc *));
162 int	xyc_reset __P((struct xyc_softc *, int, struct xy_iorq *, int,
163 			struct xy_softc *));
164 inline void xyc_rqinit __P((struct xy_iorq *, struct xyc_softc *,
165 			    struct xy_softc *, int, u_long, int,
166 			    caddr_t, struct buf *));
167 void	xyc_rqtopb __P((struct xy_iorq *, struct xy_iopb *, int, int));
168 void	xyc_start __P((struct xyc_softc *, struct xy_iorq *));
169 int	xyc_startbuf __P((struct xyc_softc *, struct xy_softc *, struct buf *));
170 int	xyc_submit_iorq __P((struct xyc_softc *, struct xy_iorq *, int));
171 void	xyc_tick __P((void *));
172 int	xyc_unbusy __P((struct xyc *, int));
173 void	xyc_xyreset __P((struct xyc_softc *, struct xy_softc *));
174 int	xy_dmamem_alloc(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
175 			int *, bus_size_t, caddr_t *, bus_addr_t *);
176 void	xy_dmamem_free(bus_dma_tag_t, bus_dmamap_t, bus_dma_segment_t *,
177 			int, bus_size_t, caddr_t);
178 
179 /* machine interrupt hook */
180 int	xycintr __P((void *));
181 
182 /* autoconf */
183 int	xycmatch __P((struct device *, struct cfdata *, void *));
184 void	xycattach __P((struct device *, struct device *, void *));
185 int	xymatch __P((struct device *, struct cfdata *, void *));
186 void	xyattach __P((struct device *, struct device *, void *));
187 static	int xyc_probe __P((void *, bus_space_tag_t, bus_space_handle_t));
188 
189 static	void xydummystrat __P((struct buf *));
190 int	xygetdisklabel __P((struct xy_softc *, void *));
191 
192 bdev_decl(xy);
193 cdev_decl(xy);
194 
195 /*
196  * cfattach's: device driver interface to autoconfig
197  */
198 
199 struct cfattach xyc_ca = {
200 	sizeof(struct xyc_softc), xycmatch, xycattach
201 };
202 
203 struct cfattach xy_ca = {
204 	sizeof(struct xy_softc), xymatch, xyattach
205 };
206 
207 extern struct cfdriver xy_cd;
208 
209 struct xyc_attach_args {	/* this is the "aux" args to xyattach */
210 	int	driveno;	/* unit number */
211 	int	fullmode;	/* submit mode */
212 	int	booting;	/* are we booting or not? */
213 };
214 
215 /*
216  * dkdriver
217  */
218 
219 struct dkdriver xydkdriver = { xystrategy };
220 
221 /*
222  * start: disk label fix code (XXX)
223  */
224 
225 static void *xy_labeldata;
226 
227 static void
228 xydummystrat(bp)
229 	struct buf *bp;
230 {
231 	if (bp->b_bcount != XYFM_BPS)
232 		panic("xydummystrat");
233 	bcopy(xy_labeldata, bp->b_data, XYFM_BPS);
234 	bp->b_flags |= B_DONE;
235 	bp->b_flags &= ~B_BUSY;
236 }
237 
238 int
239 xygetdisklabel(xy, b)
240 	struct xy_softc *xy;
241 	void *b;
242 {
243 	char *err;
244 #if defined(__sparc__) || defined(sun3)
245 	struct sun_disklabel *sdl;
246 #endif
247 
248 	/* We already have the label data in `b'; setup for dummy strategy */
249 	xy_labeldata = b;
250 
251 	/* Required parameter for readdisklabel() */
252 	xy->sc_dk.dk_label->d_secsize = XYFM_BPS;
253 
254 	err = readdisklabel(MAKEDISKDEV(0, xy->sc_dev.dv_unit, RAW_PART),
255 					xydummystrat,
256 				xy->sc_dk.dk_label, xy->sc_dk.dk_cpulabel);
257 	if (err) {
258 		printf("%s: %s\n", xy->sc_dev.dv_xname, err);
259 		return(XY_ERR_FAIL);
260 	}
261 
262 #if defined(__sparc__) || defined(sun3)
263 	/* Ok, we have the label; fill in `pcyl' if there's SunOS magic */
264 	sdl = (struct sun_disklabel *)xy->sc_dk.dk_cpulabel->cd_block;
265 	if (sdl->sl_magic == SUN_DKMAGIC) {
266 		xy->pcyl = sdl->sl_pcylinders;
267 	} else
268 #endif
269 	{
270 		printf("%s: WARNING: no `pcyl' in disk label.\n",
271 			xy->sc_dev.dv_xname);
272 		xy->pcyl = xy->sc_dk.dk_label->d_ncylinders +
273 			xy->sc_dk.dk_label->d_acylinders;
274 		printf("%s: WARNING: guessing pcyl=%d (ncyl+acyl)\n",
275 		xy->sc_dev.dv_xname, xy->pcyl);
276 	}
277 
278 	xy->ncyl = xy->sc_dk.dk_label->d_ncylinders;
279 	xy->acyl = xy->sc_dk.dk_label->d_acylinders;
280 	xy->nhead = xy->sc_dk.dk_label->d_ntracks;
281 	xy->nsect = xy->sc_dk.dk_label->d_nsectors;
282 	xy->sectpercyl = xy->nhead * xy->nsect;
283 	xy->sc_dk.dk_label->d_secsize = XYFM_BPS; /* not handled by
284                                           	  * sun->bsd */
285 	return(XY_ERR_AOK);
286 }
287 
288 /*
289  * end: disk label fix code (XXX)
290  */
291 
292 /*
293  * Shorthand for allocating, mapping and loading a DMA buffer
294  */
295 int
296 xy_dmamem_alloc(tag, map, seg, nsegp, len, kvap, dmap)
297 	bus_dma_tag_t		tag;
298 	bus_dmamap_t		map;
299 	bus_dma_segment_t	*seg;
300 	int			*nsegp;
301 	bus_size_t		len;
302 	caddr_t			*kvap;
303 	bus_addr_t		*dmap;
304 {
305 	int nseg;
306 	int error;
307 
308 	if ((error = bus_dmamem_alloc(tag, len, 0, 0,
309 				      seg, 1, &nseg, BUS_DMA_NOWAIT)) != 0) {
310 		return (error);
311 	}
312 
313 	if ((error = bus_dmamem_map(tag, seg, nseg,
314 				    len, kvap,
315 				    BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
316 		bus_dmamem_free(tag, seg, nseg);
317 		return (error);
318 	}
319 
320 	if ((error = bus_dmamap_load(tag, map, *kvap, len, NULL,
321 				     BUS_DMA_NOWAIT)) != 0) {
322 		bus_dmamem_unmap(tag, *kvap, len);
323 		bus_dmamem_free(tag, seg, nseg);
324 		return (error);
325 	}
326 
327 	*dmap = map->dm_segs[0].ds_addr;
328 	*nsegp = nseg;
329 	return (0);
330 }
331 
332 void
333 xy_dmamem_free(tag, map, seg, nseg, len, kva)
334 	bus_dma_tag_t		tag;
335 	bus_dmamap_t		map;
336 	bus_dma_segment_t	*seg;
337 	int			nseg;
338 	bus_size_t		len;
339 	caddr_t			kva;
340 {
341 
342 	bus_dmamap_unload(tag, map);
343 	bus_dmamem_unmap(tag, kva, len);
344 	bus_dmamem_free(tag, seg, nseg);
345 }
346 
347 
348 /*
349  * a u t o c o n f i g   f u n c t i o n s
350  */
351 
352 /*
353  * xycmatch: determine if xyc is present or not.   we do a
354  * soft reset to detect the xyc.
355  */
356 int
357 xyc_probe(arg, tag, handle)
358 	void *arg;
359 	bus_space_tag_t tag;
360 	bus_space_handle_t handle;
361 {
362 	struct xyc *xyc = (void *)handle; /* XXX */
363 
364 	return ((xyc_unbusy(xyc, XYC_RESETUSEC) != XY_ERR_FAIL) ? 0 : EIO);
365 }
366 
367 int xycmatch(parent, cf, aux)
368 	struct device *parent;
369 	struct cfdata *cf;
370 	void *aux;
371 {
372 	struct vme_attach_args	*va = aux;
373 	vme_chipset_tag_t	ct = va->va_vct;
374 	vme_am_t		mod;
375 	int error;
376 
377 	mod = VME_AM_A16 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
378 	if (vme_space_alloc(ct, va->r[0].offset, sizeof(struct xyc), mod))
379 		return (0);
380 
381 	error = vme_probe(ct, va->r[0].offset, sizeof(struct xyc),
382 			  mod, VME_D16, xyc_probe, 0);
383 	vme_space_free(va->va_vct, va->r[0].offset, sizeof(struct xyc), mod);
384 
385 	return (error == 0);
386 }
387 
388 /*
389  * xycattach: attach controller
390  */
391 void
392 xycattach(parent, self, aux)
393 	struct device *parent, *self;
394 	void   *aux;
395 
396 {
397 	struct xyc_softc	*xyc = (void *) self;
398 	struct vme_attach_args	*va = aux;
399 	vme_chipset_tag_t	ct = va->va_vct;
400 	bus_space_tag_t		bt;
401 	bus_space_handle_t	bh;
402 	vme_intr_handle_t	ih;
403 	vme_am_t		mod;
404 	struct xyc_attach_args	xa;
405 	int			lcv, res, error;
406 	bus_dma_segment_t	seg;
407 	int			rseg;
408 	vme_mapresc_t resc;
409 
410 	/* get addressing and intr level stuff from autoconfig and load it
411 	 * into our xyc_softc. */
412 
413 	mod = VME_AM_A16 | VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA;
414 
415 	if (vme_space_alloc(ct, va->r[0].offset, sizeof(struct xyc), mod))
416 		panic("xyc: vme alloc");
417 
418 	if (vme_space_map(ct, va->r[0].offset, sizeof(struct xyc),
419 			  mod, VME_D16, 0, &bt, &bh, &resc) != 0)
420 		panic("xyc: vme_map");
421 
422 	xyc->xyc = (struct xyc *) bh; /* XXX */
423 	xyc->ipl = va->ilevel;
424 	xyc->vector = va->ivector;
425 	xyc->no_ols = 0; /* XXX should be from config */
426 
427 	for (lcv = 0; lcv < XYC_MAXDEV; lcv++)
428 		xyc->sc_drives[lcv] = (struct xy_softc *) 0;
429 
430 	/*
431 	 * allocate and zero buffers
432 	 * check boundaries of the KVA's ... all IOPBs must reside in
433  	 * the same 64K region.
434 	 */
435 
436 	/* Get DMA handle for misc. transfers */
437 	if ((error = vme_dmamap_create(
438 				ct,		/* VME chip tag */
439 				MAXPHYS,	/* size */
440 				VME_AM_A24,	/* address modifier */
441 				VME_D16,	/* data size */
442 				0,		/* swap */
443 				1,		/* nsegments */
444 				MAXPHYS,	/* maxsegsz */
445 				0,		/* boundary */
446 				BUS_DMA_NOWAIT,
447 				&xyc->reqs[lcv].dmamap)) != 0) {
448 
449 		printf("%s: DMA buffer map create error %d\n",
450 			xyc->sc_dev.dv_xname, error);
451 		return;
452 	}
453 
454 	/* Get DMA handle for mapping iorq descriptors */
455 	if ((error = vme_dmamap_create(
456 				ct,		/* VME chip tag */
457 				XYC_MAXIOPB * sizeof(struct xy_iopb),
458 				VME_AM_A24,	/* address modifier */
459 				VME_D16,	/* data size */
460 				0,		/* swap */
461 				1,		/* nsegments */
462 				XYC_MAXIOPB * sizeof(struct xy_iopb),
463 				64*1024,	/* boundary */
464 				BUS_DMA_NOWAIT,
465 				&xyc->iopmap)) != 0) {
466 
467 		printf("%s: DMA buffer map create error %d\n",
468 			xyc->sc_dev.dv_xname, error);
469 		return;
470 	}
471 
472 	/* Get DMA buffer for iorq descriptors */
473 	if ((error = xy_dmamem_alloc(xyc->dmatag, xyc->iopmap, &seg, &rseg,
474 				     XYC_MAXIOPB * sizeof(struct xy_iopb),
475 				     (caddr_t *)&xyc->iopbase,
476 				     (bus_addr_t *)&xyc->dvmaiopb)) != 0) {
477 		printf("%s: DMA buffer alloc error %d\n",
478 			xyc->sc_dev.dv_xname, error);
479 		return;
480 	}
481 
482 	bzero(xyc->iopbase, XYC_MAXIOPB * sizeof(struct xy_iopb));
483 
484 	xyc->reqs = (struct xy_iorq *)
485 	    malloc(XYC_MAXIOPB * sizeof(struct xy_iorq),
486 	    M_DEVBUF, M_NOWAIT|M_ZERO);
487 	if (xyc->reqs == NULL)
488 		panic("xyc malloc");
489 
490 	/*
491 	 * init iorq to iopb pointers, and non-zero fields in the
492 	 * iopb which never change.
493 	 */
494 
495 	for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
496 		xyc->xy_chain[lcv] = NULL;
497 		xyc->reqs[lcv].iopb = &xyc->iopbase[lcv];
498 		xyc->reqs[lcv].dmaiopb = &xyc->dvmaiopb[lcv];
499 		xyc->iopbase[lcv].asr = 1;	/* always the same */
500 		xyc->iopbase[lcv].eef = 1;	/* always the same */
501 		xyc->iopbase[lcv].ecm = XY_ECM;	/* always the same */
502 		xyc->iopbase[lcv].aud = 1;	/* always the same */
503 		xyc->iopbase[lcv].relo = 1;	/* always the same */
504 		xyc->iopbase[lcv].thro = XY_THRO;/* always the same */
505 
506 		if ((error = vme_dmamap_create(
507 				ct,		/* VME chip tag */
508 				MAXPHYS,	/* size */
509 				VME_AM_A24,	/* address modifier */
510 				VME_D16,	/* data size */
511 				0,		/* swap */
512 				1,		/* nsegments */
513 				MAXPHYS,	/* maxsegsz */
514 				0,		/* boundary */
515 				BUS_DMA_NOWAIT,
516 				&xyc->reqs[lcv].dmamap)) != 0) {
517 
518 			printf("%s: DMA buffer map create error %d\n",
519 				xyc->sc_dev.dv_xname, error);
520 			return;
521 		}
522 	}
523 	xyc->ciorq = &xyc->reqs[XYC_CTLIOPB];    /* short hand name */
524 	xyc->ciopb = &xyc->iopbase[XYC_CTLIOPB]; /* short hand name */
525 	xyc->xy_hand = 0;
526 
527 	/* read controller parameters and insure we have a 450/451 */
528 
529 	error = xyc_cmd(xyc, XYCMD_ST, 0, 0, 0, 0, 0, XY_SUB_POLL);
530 	res = xyc->ciopb->ctyp;
531 	XYC_DONE(xyc, error);
532 	if (res != XYCT_450) {
533 		if (error)
534 			printf(": %s: ", xyc_e2str(error));
535 		printf(": doesn't identify as a 450/451\n");
536 		return;
537 	}
538 	printf(": Xylogics 450/451");
539 	if (xyc->no_ols)
540 		printf(" [OLS disabled]"); /* 450 doesn't overlap seek right */
541 	printf("\n");
542 	if (error) {
543 		printf("%s: error: %s\n", xyc->sc_dev.dv_xname,
544 				xyc_e2str(error));
545 		return;
546 	}
547 	if ((xyc->xyc->xyc_csr & XYC_ADRM) == 0) {
548 		printf("%s: 24 bit addressing turned off\n",
549 			xyc->sc_dev.dv_xname);
550 		printf("please set hardware jumpers JM1-JM2=in, JM3-JM4=out\n");
551 		printf("to enable 24 bit mode and this driver\n");
552 		return;
553 	}
554 
555 	/* link in interrupt with higher level software */
556 	vme_intr_map(ct, va->ilevel, va->ivector, &ih);
557 	vme_intr_establish(ct, ih, IPL_BIO, xycintr, xyc);
558 	evcnt_attach_dynamic(&xyc->sc_intrcnt, EVCNT_TYPE_INTR, NULL,
559 	    xyc->sc_dev.dv_xname, "intr");
560 
561 	callout_init(&xyc->sc_tick_ch);
562 
563 	/* now we must look for disks using autoconfig */
564 	xa.fullmode = XY_SUB_POLL;
565 	xa.booting = 1;
566 
567 	for (xa.driveno = 0; xa.driveno < XYC_MAXDEV; xa.driveno++)
568 		(void) config_found(self, (void *) &xa, NULL);
569 
570 	/* start the watchdog clock */
571 	callout_reset(&xyc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xyc);
572 
573 }
574 
575 /*
576  * xymatch: probe for disk.
577  *
578  * note: we almost always say disk is present.   this allows us to
579  * spin up and configure a disk after the system is booted (we can
580  * call xyattach!).
581  */
582 int
583 xymatch(parent, cf, aux)
584 	struct device *parent;
585 	struct cfdata *cf;
586 	void *aux;
587 {
588 	struct xyc_attach_args *xa = aux;
589 
590 	/* looking for autoconf wildcard or exact match */
591 
592 	if (cf->cf_loc[XYCCF_DRIVE] != XYCCF_DRIVE_DEFAULT &&
593 	    cf->cf_loc[XYCCF_DRIVE] != xa->driveno)
594 		return 0;
595 
596 	return 1;
597 
598 }
599 
600 /*
601  * xyattach: attach a disk.   this can be called from autoconf and also
602  * from xyopen/xystrategy.
603  */
604 void
605 xyattach(parent, self, aux)
606 	struct device *parent, *self;
607 	void   *aux;
608 
609 {
610 	struct xy_softc *xy = (void *) self, *oxy;
611 	struct xyc_softc *xyc = (void *) parent;
612 	struct xyc_attach_args *xa = aux;
613 	int     spt, mb, blk, lcv, fmode, s = 0, newstate;
614 	struct dkbad *dkb;
615 	int			rseg, error;
616 	bus_dma_segment_t	seg;
617 	caddr_t			dmaddr;
618 	caddr_t			buf;
619 
620 	/*
621 	 * Always re-initialize the disk structure.  We want statistics
622 	 * to start with a clean slate.
623 	 */
624 	bzero(&xy->sc_dk, sizeof(xy->sc_dk));
625 	xy->sc_dk.dk_driver = &xydkdriver;
626 	xy->sc_dk.dk_name = xy->sc_dev.dv_xname;
627 
628 	/* if booting, init the xy_softc */
629 
630 	if (xa->booting) {
631 		xy->state = XY_DRIVE_UNKNOWN;	/* to start */
632 		xy->flags = 0;
633 		xy->parent = xyc;
634 
635 		/* init queue of waiting bufs */
636 
637 		BUFQ_INIT(&xy->xyq);
638 
639 		xy->xyrq = &xyc->reqs[xa->driveno];
640 
641 	}
642 	xy->xy_drive = xa->driveno;
643 	fmode = xa->fullmode;
644 	xyc->sc_drives[xa->driveno] = xy;
645 
646 	/* if not booting, make sure we are the only process in the attach for
647 	 * this drive.   if locked out, sleep on it. */
648 
649 	if (!xa->booting) {
650 		s = splbio();
651 		while (xy->state == XY_DRIVE_ATTACHING) {
652 			if (tsleep(&xy->state, PRIBIO, "xyattach", 0)) {
653 				splx(s);
654 				return;
655 			}
656 		}
657 		printf("%s at %s",
658 			xy->sc_dev.dv_xname, xy->parent->sc_dev.dv_xname);
659 	}
660 
661 	/* we now have control */
662 	xy->state = XY_DRIVE_ATTACHING;
663 	newstate = XY_DRIVE_UNKNOWN;
664 
665 	buf = NULL;
666 	if ((error = xy_dmamem_alloc(xyc->dmatag, xyc->auxmap, &seg, &rseg,
667 				     XYFM_BPS,
668 				     (caddr_t *)&buf,
669 				     (bus_addr_t *)&dmaddr)) != 0) {
670 		printf("%s: DMA buffer alloc error %d\n",
671 			xyc->sc_dev.dv_xname, error);
672 		return;
673 	}
674 
675 	/* first try and reset the drive */
676 	error = xyc_cmd(xyc, XYCMD_RST, 0, xy->xy_drive, 0, 0, 0, fmode);
677 	XYC_DONE(xyc, error);
678 	if (error == XY_ERR_DNRY) {
679 		printf(" drive %d: off-line\n", xa->driveno);
680 		goto done;
681 	}
682 	if (error) {
683 		printf(": ERROR 0x%02x (%s)\n", error, xyc_e2str(error));
684 		goto done;
685 	}
686 	printf(" drive %d: ready", xa->driveno);
687 
688 	/*
689 	 * now set drive parameters (to semi-bogus values) so we can read the
690 	 * disk label.
691 	 */
692 	xy->pcyl = xy->ncyl = 1;
693 	xy->acyl = 0;
694 	xy->nhead = 1;
695 	xy->nsect = 1;
696 	xy->sectpercyl = 1;
697 	for (lcv = 0; lcv < 126; lcv++)	/* init empty bad144 table */
698 		xy->dkb.bt_bad[lcv].bt_cyl =
699 			xy->dkb.bt_bad[lcv].bt_trksec = 0xffff;
700 
701 	/* read disk label */
702 	for (xy->drive_type = 0 ; xy->drive_type <= XYC_MAXDT ;
703 						xy->drive_type++) {
704 		error = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, 0, 1,
705 						dmaddr, fmode);
706 		XYC_DONE(xyc, error);
707 		if (error == XY_ERR_AOK) break;
708 	}
709 
710 	if (error != XY_ERR_AOK) {
711 		printf("\n%s: reading disk label failed: %s\n",
712 			xy->sc_dev.dv_xname, xyc_e2str(error));
713 		goto done;
714 	}
715 	printf(" (drive type %d)\n", xy->drive_type);
716 
717 	newstate = XY_DRIVE_NOLABEL;
718 
719 	xy->hw_spt = spt = 0; /* XXX needed ? */
720 	/* Attach the disk: must be before getdisklabel to malloc label */
721 	disk_attach(&xy->sc_dk);
722 
723 	if (xygetdisklabel(xy, buf) != XY_ERR_AOK)
724 		goto done;
725 
726 	/* inform the user of what is up */
727 	printf("%s: <%s>, pcyl %d\n", xy->sc_dev.dv_xname,
728 		buf, xy->pcyl);
729 	mb = xy->ncyl * (xy->nhead * xy->nsect) / (1048576 / XYFM_BPS);
730 	printf("%s: %dMB, %d cyl, %d head, %d sec, %d bytes/sec\n",
731 		xy->sc_dev.dv_xname, mb, xy->ncyl, xy->nhead, xy->nsect,
732 		XYFM_BPS);
733 
734 	/*
735 	 * 450/451 stupidity: the drive type is encoded into the format
736 	 * of the disk.   the drive type in the IOPB must match the drive
737 	 * type in the format, or you will not be able to do I/O to the
738 	 * disk (you get header not found errors).  if you have two drives
739 	 * of different sizes that have the same drive type in their
740 	 * formatting then you are out of luck.
741 	 *
742 	 * this problem was corrected in the 753/7053.
743 	 */
744 
745 	for (lcv = 0 ; lcv < XYC_MAXDEV ; lcv++) {
746 		oxy = xyc->sc_drives[lcv];
747 		if (oxy == NULL || oxy == xy) continue;
748 		if (oxy->drive_type != xy->drive_type) continue;
749 		if (xy->nsect != oxy->nsect || xy->pcyl != oxy->pcyl ||
750 			xy->nhead != oxy->nhead) {
751 			printf("%s: %s and %s must be the same size!\n",
752 				xyc->sc_dev.dv_xname, xy->sc_dev.dv_xname,
753 				oxy->sc_dev.dv_xname);
754 			panic("xy drive size mismatch");
755 		}
756 	}
757 
758 
759 	/* now set the real drive parameters! */
760 
761 	blk = (xy->nsect - 1) +
762 		((xy->nhead - 1) * xy->nsect) +
763 		((xy->pcyl - 1) * xy->nsect * xy->nhead);
764 	error = xyc_cmd(xyc, XYCMD_SDS, 0, xy->xy_drive, blk, 0, 0, fmode);
765 	XYC_DONE(xyc, error);
766 	if (error) {
767 		printf("%s: write drive size failed: %s\n",
768 			xy->sc_dev.dv_xname, xyc_e2str(error));
769 		goto done;
770 	}
771 	newstate = XY_DRIVE_ONLINE;
772 
773 	/*
774 	 * read bad144 table. this table resides on the first sector of the
775 	 * last track of the disk (i.e. second cyl of "acyl" area).
776 	 */
777 
778 	blk = (xy->ncyl + xy->acyl - 1) * (xy->nhead * xy->nsect) +
779 								/* last cyl */
780 	    (xy->nhead - 1) * xy->nsect;	/* last head */
781 	error = xyc_cmd(xyc, XYCMD_RD, 0, xy->xy_drive, blk, 1,
782 						dmaddr, fmode);
783 	XYC_DONE(xyc, error);
784 	if (error) {
785 		printf("%s: reading bad144 failed: %s\n",
786 			xy->sc_dev.dv_xname, xyc_e2str(error));
787 		goto done;
788 	}
789 
790 	/* check dkbad for sanity */
791 	dkb = (struct dkbad *) buf;
792 	for (lcv = 0; lcv < 126; lcv++) {
793 		if ((dkb->bt_bad[lcv].bt_cyl == 0xffff ||
794 				dkb->bt_bad[lcv].bt_cyl == 0) &&
795 		     dkb->bt_bad[lcv].bt_trksec == 0xffff)
796 			continue;	/* blank */
797 		if (dkb->bt_bad[lcv].bt_cyl >= xy->ncyl)
798 			break;
799 		if ((dkb->bt_bad[lcv].bt_trksec >> 8) >= xy->nhead)
800 			break;
801 		if ((dkb->bt_bad[lcv].bt_trksec & 0xff) >= xy->nsect)
802 			break;
803 	}
804 	if (lcv != 126) {
805 		printf("%s: warning: invalid bad144 sector!\n",
806 			xy->sc_dev.dv_xname);
807 	} else {
808 		bcopy(buf, &xy->dkb, XYFM_BPS);
809 	}
810 
811 done:
812 	if (buf != NULL) {
813 		xy_dmamem_free(xyc->dmatag, xyc->auxmap,
814 				&seg, rseg, XYFM_BPS, buf);
815 	}
816 
817 	xy->state = newstate;
818 	if (!xa->booting) {
819 		wakeup(&xy->state);
820 		splx(s);
821 	}
822 }
823 
824 /*
825  * end of autoconfig functions
826  */
827 
828 /*
829  * { b , c } d e v s w   f u n c t i o n s
830  */
831 
832 /*
833  * xyclose: close device
834  */
835 int
836 xyclose(dev, flag, fmt, p)
837 	dev_t   dev;
838 	int     flag, fmt;
839 	struct proc *p;
840 
841 {
842 	struct xy_softc *xy = xy_cd.cd_devs[DISKUNIT(dev)];
843 	int     part = DISKPART(dev);
844 
845 	/* clear mask bits */
846 
847 	switch (fmt) {
848 	case S_IFCHR:
849 		xy->sc_dk.dk_copenmask &= ~(1 << part);
850 		break;
851 	case S_IFBLK:
852 		xy->sc_dk.dk_bopenmask &= ~(1 << part);
853 		break;
854 	}
855 	xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask;
856 
857 	return 0;
858 }
859 
860 /*
861  * xydump: crash dump system
862  */
863 int
864 xydump(dev, blkno, va, size)
865 	dev_t dev;
866 	daddr_t blkno;
867 	caddr_t va;
868 	size_t size;
869 {
870 	int     unit, part;
871 	struct xy_softc *xy;
872 
873 	unit = DISKUNIT(dev);
874 	if (unit >= xy_cd.cd_ndevs)
875 		return ENXIO;
876 	part = DISKPART(dev);
877 
878 	xy = xy_cd.cd_devs[unit];
879 
880 	printf("%s%c: crash dump not supported (yet)\n", xy->sc_dev.dv_xname,
881 	    'a' + part);
882 
883 	return ENXIO;
884 
885 	/* outline: globals: "dumplo" == sector number of partition to start
886 	 * dump at (convert to physical sector with partition table)
887 	 * "dumpsize" == size of dump in clicks "physmem" == size of physical
888 	 * memory (clicks, ctob() to get bytes) (normal case: dumpsize ==
889 	 * physmem)
890 	 *
891 	 * dump a copy of physical memory to the dump device starting at sector
892 	 * "dumplo" in the swap partition (make sure > 0).   map in pages as
893 	 * we go.   use polled I/O.
894 	 *
895 	 * XXX how to handle NON_CONTIG? */
896 
897 }
898 
899 /*
900  * xyioctl: ioctls on XY drives.   based on ioctl's of other netbsd disks.
901  */
902 int
903 xyioctl(dev, command, addr, flag, p)
904 	dev_t   dev;
905 	u_long  command;
906 	caddr_t addr;
907 	int     flag;
908 	struct proc *p;
909 
910 {
911 	struct xy_softc *xy;
912 	struct xd_iocmd *xio;
913 	int     error, s, unit;
914 #ifdef __HAVE_OLD_DISKLABEL
915 	struct disklabel newlabel;
916 #endif
917 	struct disklabel *lp;
918 
919 	unit = DISKUNIT(dev);
920 
921 	if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL)
922 		return (ENXIO);
923 
924 	/* switch on ioctl type */
925 
926 	switch (command) {
927 	case DIOCSBAD:		/* set bad144 info */
928 		if ((flag & FWRITE) == 0)
929 			return EBADF;
930 		s = splbio();
931 		bcopy(addr, &xy->dkb, sizeof(xy->dkb));
932 		splx(s);
933 		return 0;
934 
935 	case DIOCGDINFO:	/* get disk label */
936 		bcopy(xy->sc_dk.dk_label, addr, sizeof(struct disklabel));
937 		return 0;
938 #ifdef __HAVE_OLD_DISKLABEL
939 	case ODIOCGDINFO:
940 		newlabel = *(xy->sc_dk.dk_label);
941 		if (newlabel.d_npartitions > OLDMAXPARTITIONS)
942 			return ENOTTY;
943 		memcpy(addr, &newlabel, sizeof (struct olddisklabel));
944 		return 0;
945 #endif
946 
947 	case DIOCGPART:	/* get partition info */
948 		((struct partinfo *) addr)->disklab = xy->sc_dk.dk_label;
949 		((struct partinfo *) addr)->part =
950 		    &xy->sc_dk.dk_label->d_partitions[DISKPART(dev)];
951 		return 0;
952 
953 	case DIOCSDINFO:	/* set disk label */
954 #ifdef __HAVE_OLD_DISKLABEL
955 	case ODIOCSDINFO:
956 		if (command == ODIOCSDINFO) {
957 			memset(&newlabel, 0, sizeof newlabel);
958 			memcpy(&newlabel, addr, sizeof (struct olddisklabel));
959 			lp = &newlabel;
960 		} else
961 #endif
962 		lp = (struct disklabel *)addr;
963 
964 		if ((flag & FWRITE) == 0)
965 			return EBADF;
966 		error = setdisklabel(xy->sc_dk.dk_label,
967 		    lp, /* xy->sc_dk.dk_openmask : */ 0,
968 		    xy->sc_dk.dk_cpulabel);
969 		if (error == 0) {
970 			if (xy->state == XY_DRIVE_NOLABEL)
971 				xy->state = XY_DRIVE_ONLINE;
972 		}
973 		return error;
974 
975 	case DIOCWLABEL:	/* change write status of disk label */
976 		if ((flag & FWRITE) == 0)
977 			return EBADF;
978 		if (*(int *) addr)
979 			xy->flags |= XY_WLABEL;
980 		else
981 			xy->flags &= ~XY_WLABEL;
982 		return 0;
983 
984 	case DIOCWDINFO:	/* write disk label */
985 #ifdef __HAVE_OLD_DISKLABEL
986 	case ODIOCWDINFO:
987 		if (command == ODIOCWDINFO) {
988 			memset(&newlabel, 0, sizeof newlabel);
989 			memcpy(&newlabel, addr, sizeof (struct olddisklabel));
990 			lp = &newlabel;
991 		} else
992 #endif
993 		lp = (struct disklabel *)addr;
994 
995 		if ((flag & FWRITE) == 0)
996 			return EBADF;
997 		error = setdisklabel(xy->sc_dk.dk_label,
998 		    lp, /* xy->sc_dk.dk_openmask : */ 0,
999 		    xy->sc_dk.dk_cpulabel);
1000 		if (error == 0) {
1001 			if (xy->state == XY_DRIVE_NOLABEL)
1002 				xy->state = XY_DRIVE_ONLINE;
1003 
1004 			/* Simulate opening partition 0 so write succeeds. */
1005 			xy->sc_dk.dk_openmask |= (1 << 0);
1006 			error = writedisklabel(MAKEDISKDEV(major(dev), DISKUNIT(dev), RAW_PART),
1007 			    xystrategy, xy->sc_dk.dk_label,
1008 			    xy->sc_dk.dk_cpulabel);
1009 			xy->sc_dk.dk_openmask =
1010 			    xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask;
1011 		}
1012 		return error;
1013 
1014 	case DIOSXDCMD:
1015 		xio = (struct xd_iocmd *) addr;
1016 		if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
1017 			return (error);
1018 		return (xyc_ioctlcmd(xy, dev, xio));
1019 
1020 	default:
1021 		return ENOTTY;
1022 	}
1023 }
1024 
1025 /*
1026  * xyopen: open drive
1027  */
1028 
1029 int
1030 xyopen(dev, flag, fmt, p)
1031 	dev_t   dev;
1032 	int     flag, fmt;
1033 	struct proc *p;
1034 {
1035 	int     unit, part;
1036 	struct xy_softc *xy;
1037 	struct xyc_attach_args xa;
1038 
1039 	/* first, could it be a valid target? */
1040 
1041 	unit = DISKUNIT(dev);
1042 	if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == NULL)
1043 		return (ENXIO);
1044 	part = DISKPART(dev);
1045 
1046 	/* do we need to attach the drive? */
1047 
1048 	if (xy->state == XY_DRIVE_UNKNOWN) {
1049 		xa.driveno = xy->xy_drive;
1050 		xa.fullmode = XY_SUB_WAIT;
1051 		xa.booting = 0;
1052 		xyattach((struct device *) xy->parent,
1053 						(struct device *) xy, &xa);
1054 		if (xy->state == XY_DRIVE_UNKNOWN) {
1055 			return (EIO);
1056 		}
1057 	}
1058 	/* check for partition */
1059 
1060 	if (part != RAW_PART &&
1061 	    (part >= xy->sc_dk.dk_label->d_npartitions ||
1062 		xy->sc_dk.dk_label->d_partitions[part].p_fstype == FS_UNUSED)) {
1063 		return (ENXIO);
1064 	}
1065 	/* set open masks */
1066 
1067 	switch (fmt) {
1068 	case S_IFCHR:
1069 		xy->sc_dk.dk_copenmask |= (1 << part);
1070 		break;
1071 	case S_IFBLK:
1072 		xy->sc_dk.dk_bopenmask |= (1 << part);
1073 		break;
1074 	}
1075 	xy->sc_dk.dk_openmask = xy->sc_dk.dk_copenmask | xy->sc_dk.dk_bopenmask;
1076 
1077 	return 0;
1078 }
1079 
1080 int
1081 xyread(dev, uio, flags)
1082 	dev_t   dev;
1083 	struct uio *uio;
1084 	int flags;
1085 {
1086 
1087 	return (physio(xystrategy, NULL, dev, B_READ, minphys, uio));
1088 }
1089 
1090 int
1091 xywrite(dev, uio, flags)
1092 	dev_t   dev;
1093 	struct uio *uio;
1094 	int flags;
1095 {
1096 
1097 	return (physio(xystrategy, NULL, dev, B_WRITE, minphys, uio));
1098 }
1099 
1100 
1101 /*
1102  * xysize: return size of a partition for a dump
1103  */
1104 
1105 int
1106 xysize(dev)
1107 	dev_t   dev;
1108 
1109 {
1110 	struct xy_softc *xysc;
1111 	int     unit, part, size, omask;
1112 
1113 	/* valid unit? */
1114 	unit = DISKUNIT(dev);
1115 	if (unit >= xy_cd.cd_ndevs || (xysc = xy_cd.cd_devs[unit]) == NULL)
1116 		return (-1);
1117 
1118 	part = DISKPART(dev);
1119 	omask = xysc->sc_dk.dk_openmask & (1 << part);
1120 
1121 	if (omask == 0 && xyopen(dev, 0, S_IFBLK, NULL) != 0)
1122 		return (-1);
1123 
1124 	/* do it */
1125 	if (xysc->sc_dk.dk_label->d_partitions[part].p_fstype != FS_SWAP)
1126 		size = -1;	/* only give valid size for swap partitions */
1127 	else
1128 		size = xysc->sc_dk.dk_label->d_partitions[part].p_size *
1129 		    (xysc->sc_dk.dk_label->d_secsize / DEV_BSIZE);
1130 	if (omask == 0 && xyclose(dev, 0, S_IFBLK, NULL) != 0)
1131 		return (-1);
1132 	return (size);
1133 }
1134 
1135 /*
1136  * xystrategy: buffering system interface to xy.
1137  */
1138 
1139 void
1140 xystrategy(bp)
1141 	struct buf *bp;
1142 
1143 {
1144 	struct xy_softc *xy;
1145 	int     s, unit;
1146 	struct xyc_attach_args xa;
1147 	struct disklabel *lp;
1148 	daddr_t blkno;
1149 
1150 	unit = DISKUNIT(bp->b_dev);
1151 
1152 	/* check for live device */
1153 
1154 	if (unit >= xy_cd.cd_ndevs || (xy = xy_cd.cd_devs[unit]) == 0 ||
1155 	    bp->b_blkno < 0 ||
1156 	    (bp->b_bcount % xy->sc_dk.dk_label->d_secsize) != 0) {
1157 		bp->b_error = EINVAL;
1158 		goto bad;
1159 	}
1160 	/* do we need to attach the drive? */
1161 
1162 	if (xy->state == XY_DRIVE_UNKNOWN) {
1163 		xa.driveno = xy->xy_drive;
1164 		xa.fullmode = XY_SUB_WAIT;
1165 		xa.booting = 0;
1166 		xyattach((struct device *)xy->parent, (struct device *)xy, &xa);
1167 		if (xy->state == XY_DRIVE_UNKNOWN) {
1168 			bp->b_error = EIO;
1169 			goto bad;
1170 		}
1171 	}
1172 	if (xy->state != XY_DRIVE_ONLINE && DISKPART(bp->b_dev) != RAW_PART) {
1173 		/* no I/O to unlabeled disks, unless raw partition */
1174 		bp->b_error = EIO;
1175 		goto bad;
1176 	}
1177 	/* short circuit zero length request */
1178 
1179 	if (bp->b_bcount == 0)
1180 		goto done;
1181 
1182 	/* check bounds with label (disksubr.c).  Determine the size of the
1183 	 * transfer, and make sure it is within the boundaries of the
1184 	 * partition. Adjust transfer if needed, and signal errors or early
1185 	 * completion. */
1186 
1187 	lp = xy->sc_dk.dk_label;
1188 
1189 	if (bounds_check_with_label(bp, lp,
1190 		(xy->flags & XY_WLABEL) != 0) <= 0)
1191 		goto done;
1192 
1193 	/*
1194 	 * Now convert the block number to absolute and put it in
1195 	 * terms of the device's logical block size.
1196 	 */
1197 	blkno = bp->b_blkno / (lp->d_secsize / DEV_BSIZE);
1198 	if (DISKPART(bp->b_dev) != RAW_PART)
1199 		blkno += lp->d_partitions[DISKPART(bp->b_dev)].p_offset;
1200 
1201 	bp->b_rawblkno = blkno;
1202 
1203 	/*
1204 	 * now we know we have a valid buf structure that we need to do I/O
1205 	 * on.
1206 	 */
1207 	s = splbio();		/* protect the queues */
1208 
1209 	disksort_blkno(&xy->xyq, bp);
1210 
1211 	/* start 'em up */
1212 
1213 	xyc_start(xy->parent, NULL);
1214 
1215 	/* done! */
1216 
1217 	splx(s);
1218 	return;
1219 
1220 bad:				/* tells upper layers we have an error */
1221 	bp->b_flags |= B_ERROR;
1222 done:				/* tells upper layers we are done with this
1223 				 * buf */
1224 	bp->b_resid = bp->b_bcount;
1225 	biodone(bp);
1226 }
1227 /*
1228  * end of {b,c}devsw functions
1229  */
1230 
1231 /*
1232  * i n t e r r u p t   f u n c t i o n
1233  *
1234  * xycintr: hardware interrupt.
1235  */
1236 int
1237 xycintr(v)
1238 	void   *v;
1239 
1240 {
1241 	struct xyc_softc *xycsc = v;
1242 
1243 	/* kick the event counter */
1244 
1245 	xycsc->sc_intrcnt.ev_count++;
1246 
1247 	/* remove as many done IOPBs as possible */
1248 
1249 	xyc_remove_iorq(xycsc);
1250 
1251 	/* start any iorq's already waiting */
1252 
1253 	xyc_start(xycsc, NULL);
1254 
1255 	return (1);
1256 }
1257 /*
1258  * end of interrupt function
1259  */
1260 
1261 /*
1262  * i n t e r n a l   f u n c t i o n s
1263  */
1264 
1265 /*
1266  * xyc_rqinit: fill out the fields of an I/O request
1267  */
1268 
1269 inline void
1270 xyc_rqinit(rq, xyc, xy, md, blk, cnt, db, bp)
1271 	struct xy_iorq *rq;
1272 	struct xyc_softc *xyc;
1273 	struct xy_softc *xy;
1274 	int     md;
1275 	u_long  blk;
1276 	int     cnt;
1277 	caddr_t db;
1278 	struct buf *bp;
1279 {
1280 	rq->xyc = xyc;
1281 	rq->xy = xy;
1282 	rq->ttl = XYC_MAXTTL + 10;
1283 	rq->mode = md;
1284 	rq->tries = rq->errno = rq->lasterror = 0;
1285 	rq->blockno = blk;
1286 	rq->sectcnt = cnt;
1287 	rq->dbuf = db;
1288 	rq->buf = bp;
1289 }
1290 
1291 /*
1292  * xyc_rqtopb: load up an IOPB based on an iorq
1293  */
1294 
1295 void
1296 xyc_rqtopb(iorq, iopb, cmd, subfun)
1297 	struct xy_iorq *iorq;
1298 	struct xy_iopb *iopb;
1299 	int     cmd, subfun;
1300 
1301 {
1302 	u_long  block, dp;
1303 
1304 	/* normal IOPB case, standard stuff */
1305 
1306 	/* chain bit handled later */
1307 	iopb->ien = (XY_STATE(iorq->mode) == XY_SUB_POLL) ? 0 : 1;
1308 	iopb->com = cmd;
1309 	iopb->errno = 0;
1310 	iopb->errs = 0;
1311 	iopb->done = 0;
1312 	if (iorq->xy) {
1313 		iopb->unit = iorq->xy->xy_drive;
1314 		iopb->dt = iorq->xy->drive_type;
1315 	} else {
1316 		iopb->unit = 0;
1317 		iopb->dt = 0;
1318 	}
1319 	block = iorq->blockno;
1320 	if (iorq->xy == NULL || block == 0) {
1321 		iopb->sect = iopb->head = iopb->cyl = 0;
1322 	} else {
1323 		iopb->sect = block % iorq->xy->nsect;
1324 		block = block / iorq->xy->nsect;
1325 		iopb->head = block % iorq->xy->nhead;
1326 		block = block / iorq->xy->nhead;
1327 		iopb->cyl = block;
1328 	}
1329 	iopb->scnt = iorq->sectcnt;
1330 	dp = (u_long) iorq->dbuf;
1331 	if (iorq->dbuf == NULL) {
1332 		iopb->dataa = 0;
1333 		iopb->datar = 0;
1334 	} else {
1335 		iopb->dataa = (dp & 0xffff);
1336 		iopb->datar = ((dp & 0xff0000) >> 16);
1337 	}
1338 	iopb->subfn = subfun;
1339 }
1340 
1341 
1342 /*
1343  * xyc_unbusy: wait for the xyc to go unbusy, or timeout.
1344  */
1345 
1346 int
1347 xyc_unbusy(xyc, del)
1348 
1349 struct xyc *xyc;
1350 int del;
1351 
1352 {
1353 	while (del-- > 0) {
1354 		if ((xyc->xyc_csr & XYC_GBSY) == 0)
1355 			break;
1356 		DELAY(1);
1357 	}
1358 	return(del == 0 ? XY_ERR_FAIL : XY_ERR_AOK);
1359 }
1360 
1361 /*
1362  * xyc_cmd: front end for POLL'd and WAIT'd commands.  Returns 0 or error.
1363  * note that NORM requests are handled separately.
1364  */
1365 int
1366 xyc_cmd(xycsc, cmd, subfn, unit, block, scnt, dptr, fullmode)
1367 	struct xyc_softc *xycsc;
1368 	int     cmd, subfn, unit, block, scnt;
1369 	char   *dptr;
1370 	int     fullmode;
1371 
1372 {
1373 	int     submode = XY_STATE(fullmode);
1374 	struct xy_iorq *iorq = xycsc->ciorq;
1375 	struct xy_iopb *iopb = xycsc->ciopb;
1376 
1377 	/*
1378 	 * is someone else using the control iopq wait for it if we can
1379 	 */
1380 start:
1381 	if (submode == XY_SUB_WAIT && XY_STATE(iorq->mode) != XY_SUB_FREE) {
1382 		if (tsleep(iorq, PRIBIO, "xyc_cmd", 0))
1383                                 return(XY_ERR_FAIL);
1384 		goto start;
1385 	}
1386 
1387 	if (XY_STATE(iorq->mode) != XY_SUB_FREE) {
1388 		DELAY(1000000);		/* XY_SUB_POLL: steal the iorq */
1389 		iorq->mode = XY_SUB_FREE;
1390 		printf("%s: stole control iopb\n", xycsc->sc_dev.dv_xname);
1391 	}
1392 
1393 	/* init iorq/iopb */
1394 
1395 	xyc_rqinit(iorq, xycsc,
1396 	    (unit == XYC_NOUNIT) ? NULL : xycsc->sc_drives[unit],
1397 	    fullmode, block, scnt, dptr, NULL);
1398 
1399 	/* load IOPB from iorq */
1400 
1401 	xyc_rqtopb(iorq, iopb, cmd, subfn);
1402 
1403 	/* submit it for processing */
1404 
1405 	xyc_submit_iorq(xycsc, iorq, fullmode);	/* error code will be in iorq */
1406 
1407 	return(XY_ERR_AOK);
1408 }
1409 
1410 /*
1411  * xyc_startbuf
1412  * start a buffer for running
1413  */
1414 
1415 int
1416 xyc_startbuf(xycsc, xysc, bp)
1417 	struct xyc_softc *xycsc;
1418 	struct xy_softc *xysc;
1419 	struct buf *bp;
1420 
1421 {
1422 	int     partno, error;
1423 	struct xy_iorq *iorq;
1424 	struct xy_iopb *iopb;
1425 	u_long  block;
1426 
1427 	iorq = xysc->xyrq;
1428 	iopb = iorq->iopb;
1429 
1430 	/* get buf */
1431 
1432 	if (bp == NULL)
1433 		panic("xyc_startbuf null buf");
1434 
1435 	partno = DISKPART(bp->b_dev);
1436 #ifdef XYC_DEBUG
1437 	printf("xyc_startbuf: %s%c: %s block %d\n", xysc->sc_dev.dv_xname,
1438 	    'a' + partno, (bp->b_flags & B_READ) ? "read" : "write", bp->b_blkno);
1439 	printf("xyc_startbuf: b_bcount %d, b_data 0x%x\n",
1440 	    bp->b_bcount, bp->b_data);
1441 #endif
1442 
1443 	/*
1444 	 * load request.
1445 	 *
1446 	 * note that iorq points to the buffer as mapped into DVMA space,
1447 	 * where as the bp->b_data points to its non-DVMA mapping.
1448 	 */
1449 
1450 	block = bp->b_rawblkno;
1451 
1452 	error = bus_dmamap_load(xycsc->dmatag, iorq->dmamap,
1453 			bp->b_data, bp->b_bcount, 0, BUS_DMA_NOWAIT);
1454 	if (error != 0) {
1455 		printf("%s: warning: cannot load DMA map\n",
1456 			xycsc->sc_dev.dv_xname);
1457 		return (XY_ERR_FAIL);	/* XXX: need some sort of
1458 					 * call-back scheme here? */
1459 	}
1460 
1461 	bus_dmamap_sync(xycsc->dmatag, iorq->dmamap, 0,
1462 			iorq->dmamap->dm_mapsize, (bp->b_flags & B_READ)
1463 				? BUS_DMASYNC_PREREAD
1464 				: BUS_DMASYNC_PREWRITE);
1465 
1466 	/* init iorq and load iopb from it */
1467 	xyc_rqinit(iorq, xycsc, xysc, XY_SUB_NORM | XY_MODE_VERBO, block,
1468 		   bp->b_bcount / XYFM_BPS,
1469 		   (caddr_t)(u_long)iorq->dmamap->dm_segs[0].ds_addr,
1470 		   bp);
1471 
1472 	xyc_rqtopb(iorq, iopb, (bp->b_flags & B_READ) ? XYCMD_RD : XYCMD_WR, 0);
1473 
1474 	/* Instrumentation. */
1475 	disk_busy(&xysc->sc_dk);
1476 
1477 	return (XY_ERR_AOK);
1478 }
1479 
1480 
1481 /*
1482  * xyc_submit_iorq: submit an iorq for processing.  returns XY_ERR_AOK
1483  * if ok.  if it fail returns an error code.  type is XY_SUB_*.
1484  *
1485  * note: caller frees iorq in all cases except NORM
1486  *
1487  * return value:
1488  *   NORM: XY_AOK (req pending), XY_FAIL (couldn't submit request)
1489  *   WAIT: XY_AOK (success), <error-code> (failed)
1490  *   POLL: <same as WAIT>
1491  *   NOQ : <same as NORM>
1492  *
1493  * there are three sources for i/o requests:
1494  * [1] xystrategy: normal block I/O, using "struct buf" system.
1495  * [2] autoconfig/crash dump: these are polled I/O requests, no interrupts.
1496  * [3] open/ioctl: these are I/O requests done in the context of a process,
1497  *                 and the process should block until they are done.
1498  *
1499  * software state is stored in the iorq structure.  each iorq has an
1500  * iopb structure.  the hardware understands the iopb structure.
1501  * every command must go through an iopb.  a 450 handles one iopb at a
1502  * time, where as a 451 can take them in chains.  [the 450 claims it
1503  * can handle chains, but is appears to be buggy...]   iopb are allocated
1504  * in DVMA space at boot up time.  each disk gets one iopb, and the
1505  * controller gets one (for POLL and WAIT commands).  what happens if
1506  * the iopb is busy?  for i/o type [1], the buffers are queued at the
1507  * "buff" layer and * picked up later by the interrupt routine.  for case
1508  * [2] we can only be blocked if there is a WAIT type I/O request being
1509  * run.   since this can only happen when we are crashing, we wait a sec
1510  * and then steal the IOPB.  for case [3] the process can sleep
1511  * on the iorq free list until some iopbs are avaliable.
1512  */
1513 
1514 
1515 int
1516 xyc_submit_iorq(xycsc, iorq, type)
1517 	struct xyc_softc *xycsc;
1518 	struct xy_iorq *iorq;
1519 	int     type;
1520 
1521 {
1522 	struct xy_iopb *dmaiopb;
1523 
1524 #ifdef XYC_DEBUG
1525 	printf("xyc_submit_iorq(%s, addr=0x%x, type=%d)\n",
1526 		xycsc->sc_dev.dv_xname, iorq, type);
1527 #endif
1528 
1529 	/* first check and see if controller is busy */
1530 	if ((xycsc->xyc->xyc_csr & XYC_GBSY) != 0) {
1531 #ifdef XYC_DEBUG
1532 		printf("xyc_submit_iorq: XYC not ready (BUSY)\n");
1533 #endif
1534 		if (type == XY_SUB_NOQ)
1535 			return (XY_ERR_FAIL);	/* failed */
1536 		switch (type) {
1537 		case XY_SUB_NORM:
1538 			return XY_ERR_AOK;	/* success */
1539 		case XY_SUB_WAIT:
1540 			while (iorq->iopb->done == 0) {
1541 				(void) tsleep(iorq, PRIBIO, "xyciorq", 0);
1542 			}
1543 			return (iorq->errno);
1544 		case XY_SUB_POLL:		/* steal controller */
1545 			(void)xycsc->xyc->xyc_rsetup; /* RESET */
1546 			if (xyc_unbusy(xycsc->xyc,XYC_RESETUSEC) == XY_ERR_FAIL)
1547 				panic("xyc_submit_iorq: stuck xyc");
1548 			printf("%s: stole controller\n",
1549 				xycsc->sc_dev.dv_xname);
1550 			break;
1551 		default:
1552 			panic("xyc_submit_iorq adding");
1553 		}
1554 	}
1555 
1556 	dmaiopb = xyc_chain(xycsc, iorq);	 /* build chain */
1557 	if (dmaiopb == NULL) { /* nothing doing? */
1558 		if (type == XY_SUB_NORM || type == XY_SUB_NOQ)
1559 			return(XY_ERR_AOK);
1560 		panic("xyc_submit_iorq: xyc_chain failed!\n");
1561 	}
1562 
1563 	XYC_GO(xycsc->xyc, (u_long)dmaiopb);
1564 
1565 	/* command now running, wrap it up */
1566 	switch (type) {
1567 	case XY_SUB_NORM:
1568 	case XY_SUB_NOQ:
1569 		return (XY_ERR_AOK);	/* success */
1570 	case XY_SUB_WAIT:
1571 		while (iorq->iopb->done == 0) {
1572 			(void) tsleep(iorq, PRIBIO, "xyciorq", 0);
1573 		}
1574 		return (iorq->errno);
1575 	case XY_SUB_POLL:
1576 		return (xyc_piodriver(xycsc, iorq));
1577 	default:
1578 		panic("xyc_submit_iorq wrap up");
1579 	}
1580 	panic("xyc_submit_iorq");
1581 	return 0;	/* not reached */
1582 }
1583 
1584 
1585 /*
1586  * xyc_chain: build a chain.  return dvma address of first element in
1587  * the chain.   iorq != NULL: means we only want that item on the chain.
1588  */
1589 
1590 struct xy_iopb *
1591 xyc_chain(xycsc, iorq)
1592 	struct xyc_softc *xycsc;
1593 	struct xy_iorq *iorq;
1594 
1595 {
1596 	int togo, chain, hand;
1597 
1598 	bzero(xycsc->xy_chain, sizeof(xycsc->xy_chain));
1599 
1600 	/*
1601 	 * promote control IOPB to the top
1602 	 */
1603 	if (iorq == NULL) {
1604 		if ((XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_POLL ||
1605 		     XY_STATE(xycsc->reqs[XYC_CTLIOPB].mode) == XY_SUB_WAIT) &&
1606 		     xycsc->iopbase[XYC_CTLIOPB].done == 0)
1607 			iorq = &xycsc->reqs[XYC_CTLIOPB];
1608 	}
1609 
1610 	/*
1611 	 * special case: if iorq != NULL then we have a POLL or WAIT request.
1612 	 * we let these take priority and do them first.
1613 	 */
1614 	if (iorq) {
1615 		xycsc->xy_chain[0] = iorq;
1616 		iorq->iopb->chen = 0;
1617 		return(iorq->dmaiopb);
1618 	}
1619 
1620 	/*
1621 	 * NORM case: do round robin and maybe chain (if allowed and possible)
1622 	 */
1623 	chain = 0;
1624 	hand = xycsc->xy_hand;
1625 	xycsc->xy_hand = (xycsc->xy_hand + 1) % XYC_MAXIOPB;
1626 
1627 	for (togo = XYC_MAXIOPB; togo > 0;
1628 	     togo--, hand = (hand + 1) % XYC_MAXIOPB) {
1629 		struct xy_iopb *iopb, *prev_iopb, *dmaiopb;
1630 
1631 		if (XY_STATE(xycsc->reqs[hand].mode) != XY_SUB_NORM ||
1632 		    xycsc->iopbase[hand].done)
1633 			continue;   /* not ready-for-i/o */
1634 
1635 		xycsc->xy_chain[chain] = &xycsc->reqs[hand];
1636 		iopb = xycsc->xy_chain[chain]->iopb;
1637 		iopb->chen = 0;
1638 		if (chain != 0) {
1639 			/* adding a link to a chain */
1640 			prev_iopb = xycsc->xy_chain[chain-1]->iopb;
1641 			prev_iopb->chen = 1;
1642 			dmaiopb = xycsc->xy_chain[chain]->dmaiopb;
1643 			prev_iopb->nxtiopb = ((u_long)dmaiopb) & 0xffff;
1644 		} else {
1645 			/* head of chain */
1646 			iorq = xycsc->xy_chain[chain];
1647 		}
1648 		chain++;
1649 
1650 		/* quit if chaining dis-allowed */
1651 		if (xycsc->no_ols)
1652 			break;
1653 	}
1654 
1655 	return(iorq ? iorq->dmaiopb : NULL);
1656 }
1657 
1658 /*
1659  * xyc_piodriver
1660  *
1661  * programmed i/o driver.   this function takes over the computer
1662  * and drains off the polled i/o request.   it returns the status of the iorq
1663  * the caller is interesting in.
1664  */
1665 int
1666 xyc_piodriver(xycsc, iorq)
1667 	struct xyc_softc *xycsc;
1668 	struct xy_iorq  *iorq;
1669 
1670 {
1671 	int     nreset = 0;
1672 	int     retval = 0;
1673 	u_long  res;
1674 #ifdef XYC_DEBUG
1675 	printf("xyc_piodriver(%s, 0x%x)\n", xycsc->sc_dev.dv_xname, iorq);
1676 #endif
1677 
1678 	while (iorq->iopb->done == 0) {
1679 
1680 		res = xyc_unbusy(xycsc->xyc, XYC_MAXTIME);
1681 
1682 		/* we expect some progress soon */
1683 		if (res == XY_ERR_FAIL && nreset >= 2) {
1684 			xyc_reset(xycsc, 0, XY_RSET_ALL, XY_ERR_FAIL, 0);
1685 #ifdef XYC_DEBUG
1686 			printf("xyc_piodriver: timeout\n");
1687 #endif
1688 			return (XY_ERR_FAIL);
1689 		}
1690 		if (res == XY_ERR_FAIL) {
1691 			if (xyc_reset(xycsc, 0,
1692 				      (nreset++ == 0) ? XY_RSET_NONE : iorq,
1693 				      XY_ERR_FAIL,
1694 				      0) == XY_ERR_FAIL)
1695 				return (XY_ERR_FAIL);	/* flushes all but POLL
1696 							 * requests, resets */
1697 			continue;
1698 		}
1699 
1700 		xyc_remove_iorq(xycsc);	 /* may resubmit request */
1701 
1702 		if (iorq->iopb->done == 0)
1703 			xyc_start(xycsc, iorq);
1704 	}
1705 
1706 	/* get return value */
1707 
1708 	retval = iorq->errno;
1709 
1710 #ifdef XYC_DEBUG
1711 	printf("xyc_piodriver: done, retval = 0x%x (%s)\n",
1712 	    iorq->errno, xyc_e2str(iorq->errno));
1713 #endif
1714 
1715 	/* start up any bufs that have queued */
1716 
1717 	xyc_start(xycsc, NULL);
1718 
1719 	return (retval);
1720 }
1721 
1722 /*
1723  * xyc_xyreset: reset one drive.   NOTE: assumes xyc was just reset.
1724  * we steal iopb[XYC_CTLIOPB] for this, but we put it back when we are done.
1725  */
1726 void
1727 xyc_xyreset(xycsc, xysc)
1728 	struct xyc_softc *xycsc;
1729 	struct xy_softc *xysc;
1730 
1731 {
1732 	struct xy_iopb tmpiopb;
1733 	struct xy_iopb *iopb;
1734 	int     del;
1735 
1736 	iopb = xycsc->ciopb;
1737 
1738 	/* Save contents */
1739 	bcopy(iopb, &tmpiopb, sizeof(struct xy_iopb));
1740 
1741 	iopb->chen = iopb->done = iopb->errs = 0;
1742 	iopb->ien = 0;
1743 	iopb->com = XYCMD_RST;
1744 	iopb->unit = xysc->xy_drive;
1745 
1746 	XYC_GO(xycsc->xyc, (u_long)xycsc->ciorq->dmaiopb);
1747 
1748 	del = XYC_RESETUSEC;
1749 	while (del > 0) {
1750 		if ((xycsc->xyc->xyc_csr & XYC_GBSY) == 0)
1751 			break;
1752 		DELAY(1);
1753 		del--;
1754 	}
1755 
1756 	if (del <= 0 || iopb->errs) {
1757 		printf("%s: off-line: %s\n", xycsc->sc_dev.dv_xname,
1758 		    xyc_e2str(iopb->errno));
1759 		del = xycsc->xyc->xyc_rsetup;
1760 		if (xyc_unbusy(xycsc->xyc, XYC_RESETUSEC) == XY_ERR_FAIL)
1761 			panic("xyc_reset");
1762 	} else {
1763 		xycsc->xyc->xyc_csr = XYC_IPND;	/* clear IPND */
1764 	}
1765 
1766 	/* Restore contents */
1767 	bcopy(&tmpiopb, iopb, sizeof(struct xy_iopb));
1768 }
1769 
1770 
1771 /*
1772  * xyc_reset: reset everything: requests are marked as errors except
1773  * a polled request (which is resubmitted)
1774  */
1775 int
1776 xyc_reset(xycsc, quiet, blastmode, error, xysc)
1777 	struct xyc_softc *xycsc;
1778 	int     quiet, error;
1779 	struct xy_iorq *blastmode;
1780 	struct xy_softc *xysc;
1781 
1782 {
1783 	int     del = 0, lcv, retval = XY_ERR_AOK;
1784 
1785 	/* soft reset hardware */
1786 
1787 	if (!quiet)
1788 		printf("%s: soft reset\n", xycsc->sc_dev.dv_xname);
1789 	del = xycsc->xyc->xyc_rsetup;
1790 	del = xyc_unbusy(xycsc->xyc, XYC_RESETUSEC);
1791 	if (del == XY_ERR_FAIL) {
1792 		blastmode = XY_RSET_ALL;	/* dead, flush all requests */
1793 		retval = XY_ERR_FAIL;
1794 	}
1795 	if (xysc)
1796 		xyc_xyreset(xycsc, xysc);
1797 
1798 	/* fix queues based on "blast-mode" */
1799 
1800 	for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
1801 		register struct xy_iorq *iorq = &xycsc->reqs[lcv];
1802 
1803 		if (XY_STATE(iorq->mode) != XY_SUB_POLL &&
1804 		    XY_STATE(iorq->mode) != XY_SUB_WAIT &&
1805 		    XY_STATE(iorq->mode) != XY_SUB_NORM)
1806 			/* is it active? */
1807 			continue;
1808 
1809 		if (blastmode == XY_RSET_ALL ||
1810 				blastmode != iorq) {
1811 			/* failed */
1812 			iorq->errno = error;
1813 			xycsc->iopbase[lcv].done = xycsc->iopbase[lcv].errs = 1;
1814 			switch (XY_STATE(iorq->mode)) {
1815 			case XY_SUB_NORM:
1816 			    iorq->buf->b_error = EIO;
1817 			    iorq->buf->b_flags |= B_ERROR;
1818 			    iorq->buf->b_resid = iorq->sectcnt * XYFM_BPS;
1819 
1820 			    bus_dmamap_sync(xycsc->dmatag, iorq->dmamap, 0,
1821 					iorq->dmamap->dm_mapsize,
1822 					(iorq->buf->b_flags & B_READ)
1823 						? BUS_DMASYNC_POSTREAD
1824 						: BUS_DMASYNC_POSTWRITE);
1825 
1826 			    bus_dmamap_unload(xycsc->dmatag, iorq->dmamap);
1827 
1828 			    BUFQ_REMOVE(&iorq->xy->xyq, iorq->buf);
1829 			    disk_unbusy(&xycsc->reqs[lcv].xy->sc_dk,
1830 				(xycsc->reqs[lcv].buf->b_bcount -
1831 				xycsc->reqs[lcv].buf->b_resid));
1832 			    biodone(iorq->buf);
1833 			    iorq->mode = XY_SUB_FREE;
1834 			    break;
1835 			case XY_SUB_WAIT:
1836 			    wakeup(iorq);
1837 			case XY_SUB_POLL:
1838 			    iorq->mode =
1839 				XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
1840 			    break;
1841 			}
1842 
1843 		} else {
1844 
1845 			/* resubmit, no need to do anything here */
1846 		}
1847 	}
1848 
1849 	/*
1850 	 * now, if stuff is waiting, start it.
1851 	 * since we just reset it should go
1852 	 */
1853 	xyc_start(xycsc, NULL);
1854 
1855 	return (retval);
1856 }
1857 
1858 /*
1859  * xyc_start: start waiting buffers
1860  */
1861 
1862 void
1863 xyc_start(xycsc, iorq)
1864 	struct xyc_softc *xycsc;
1865 	struct xy_iorq *iorq;
1866 
1867 {
1868 	int lcv;
1869 	struct xy_softc *xy;
1870 
1871 	if (iorq == NULL) {
1872 		for (lcv = 0; lcv < XYC_MAXDEV ; lcv++) {
1873 			if ((xy = xycsc->sc_drives[lcv]) == NULL) continue;
1874 			if (BUFQ_FIRST(&xy->xyq) == NULL) continue;
1875 			if (xy->xyrq->mode != XY_SUB_FREE) continue;
1876 			xyc_startbuf(xycsc, xy, BUFQ_FIRST(&xy->xyq));
1877 		}
1878 	}
1879 	xyc_submit_iorq(xycsc, iorq, XY_SUB_NOQ);
1880 }
1881 
1882 /*
1883  * xyc_remove_iorq: remove "done" IOPB's.
1884  */
1885 
1886 int
1887 xyc_remove_iorq(xycsc)
1888 	struct xyc_softc *xycsc;
1889 
1890 {
1891 	int     errno, rq, comm, errs;
1892 	struct xyc *xyc = xycsc->xyc;
1893 	u_long  addr;
1894 	struct xy_iopb *iopb;
1895 	struct xy_iorq *iorq;
1896 	struct buf *bp;
1897 
1898 	if (xyc->xyc_csr & XYC_DERR) {
1899 		/*
1900 		 * DOUBLE ERROR: should never happen under normal use. This
1901 		 * error is so bad, you can't even tell which IOPB is bad, so
1902 		 * we dump them all.
1903 		 */
1904 		errno = XY_ERR_DERR;
1905 		printf("%s: DOUBLE ERROR!\n", xycsc->sc_dev.dv_xname);
1906 		if (xyc_reset(xycsc, 0, XY_RSET_ALL, errno, 0) != XY_ERR_AOK) {
1907 			printf("%s: soft reset failed!\n",
1908 				xycsc->sc_dev.dv_xname);
1909 			panic("xyc_remove_iorq: controller DEAD");
1910 		}
1911 		return (XY_ERR_AOK);
1912 	}
1913 
1914 	/*
1915 	 * get iopb that is done, loop down the chain
1916 	 */
1917 
1918 	if (xyc->xyc_csr & XYC_ERR) {
1919 		xyc->xyc_csr = XYC_ERR; /* clear error condition */
1920 	}
1921 	if (xyc->xyc_csr & XYC_IPND) {
1922 		xyc->xyc_csr = XYC_IPND; /* clear interrupt */
1923 	}
1924 
1925 	for (rq = 0; rq < XYC_MAXIOPB; rq++) {
1926 		iorq = xycsc->xy_chain[rq];
1927 		if (iorq == NULL) break; /* done ! */
1928 		if (iorq->mode == 0 || XY_STATE(iorq->mode) == XY_SUB_DONE)
1929 			continue;	/* free, or done */
1930 		iopb = iorq->iopb;
1931 		if (iopb->done == 0)
1932 			continue;	/* not done yet */
1933 
1934 		comm = iopb->com;
1935 		errs = iopb->errs;
1936 
1937 		if (errs)
1938 			iorq->errno = iopb->errno;
1939 		else
1940 			iorq->errno = 0;
1941 
1942 		/* handle non-fatal errors */
1943 
1944 		if (errs &&
1945 		    xyc_error(xycsc, iorq, iopb, comm) == XY_ERR_AOK)
1946 			continue;	/* AOK: we resubmitted it */
1947 
1948 
1949 		/* this iorq is now done (hasn't been restarted or anything) */
1950 
1951 		if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror)
1952 			xyc_perror(iorq, iopb, 0);
1953 
1954 		/* now, if read/write check to make sure we got all the data
1955 		 * we needed. (this may not be the case if we got an error in
1956 		 * the middle of a multisector request).   */
1957 
1958 		if ((iorq->mode & XY_MODE_B144) != 0 && errs == 0 &&
1959 		    (comm == XYCMD_RD || comm == XYCMD_WR)) {
1960 			/* we just successfully processed a bad144 sector
1961 			 * note: if we are in bad 144 mode, the pointers have
1962 			 * been advanced already (see above) and are pointing
1963 			 * at the bad144 sector.   to exit bad144 mode, we
1964 			 * must advance the pointers 1 sector and issue a new
1965 			 * request if there are still sectors left to process
1966 			 *
1967 			 */
1968 			XYC_ADVANCE(iorq, 1);	/* advance 1 sector */
1969 
1970 			/* exit b144 mode */
1971 			iorq->mode = iorq->mode & (~XY_MODE_B144);
1972 
1973 			if (iorq->sectcnt) {	/* more to go! */
1974 				iorq->lasterror = iorq->errno = iopb->errno = 0;
1975 				iopb->errs = iopb->done = 0;
1976 				iorq->tries = 0;
1977 				iopb->scnt = iorq->sectcnt;
1978 				iopb->cyl = iorq->blockno /
1979 						iorq->xy->sectpercyl;
1980 				iopb->head =
1981 					(iorq->blockno / iorq->xy->nhead) %
1982 						iorq->xy->nhead;
1983 				iopb->sect = iorq->blockno % XYFM_BPS;
1984 				addr = (u_long) iorq->dbuf;
1985 				iopb->dataa = (addr & 0xffff);
1986 				iopb->datar = ((addr & 0xff0000) >> 16);
1987 				/* will resubit at end */
1988 				continue;
1989 			}
1990 		}
1991 		/* final cleanup, totally done with this request */
1992 
1993 		switch (XY_STATE(iorq->mode)) {
1994 		case XY_SUB_NORM:
1995 			bp = iorq->buf;
1996 			if (errs) {
1997 				bp->b_error = EIO;
1998 				bp->b_flags |= B_ERROR;
1999 				bp->b_resid = iorq->sectcnt * XYFM_BPS;
2000 			} else {
2001 				bp->b_resid = 0;	/* done */
2002 			}
2003 			bus_dmamap_sync(xycsc->dmatag, iorq->dmamap, 0,
2004 					iorq->dmamap->dm_mapsize,
2005 					(iorq->buf->b_flags & B_READ)
2006 						? BUS_DMASYNC_POSTREAD
2007 						: BUS_DMASYNC_POSTWRITE);
2008 
2009 			bus_dmamap_unload(xycsc->dmatag, iorq->dmamap);
2010 
2011 			BUFQ_REMOVE(&iorq->xy->xyq, bp);
2012 			disk_unbusy(&iorq->xy->sc_dk,
2013 			    (bp->b_bcount - bp->b_resid));
2014 			iorq->mode = XY_SUB_FREE;
2015 			biodone(bp);
2016 			break;
2017 		case XY_SUB_WAIT:
2018 			iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
2019 			wakeup(iorq);
2020 			break;
2021 		case XY_SUB_POLL:
2022 			iorq->mode = XY_NEWSTATE(iorq->mode, XY_SUB_DONE);
2023 			break;
2024 		}
2025 	}
2026 
2027 	return (XY_ERR_AOK);
2028 }
2029 
2030 /*
2031  * xyc_perror: print error.
2032  * - if still_trying is true: we got an error, retried and got a
2033  *   different error.  in that case lasterror is the old error,
2034  *   and errno is the new one.
2035  * - if still_trying is not true, then if we ever had an error it
2036  *   is in lasterror. also, if iorq->errno == 0, then we recovered
2037  *   from that error (otherwise iorq->errno == iorq->lasterror).
2038  */
2039 void
2040 xyc_perror(iorq, iopb, still_trying)
2041 	struct xy_iorq *iorq;
2042 	struct xy_iopb *iopb;
2043 	int     still_trying;
2044 
2045 {
2046 
2047 	int     error = iorq->lasterror;
2048 
2049 	printf("%s", (iorq->xy) ? iorq->xy->sc_dev.dv_xname
2050 	    : iorq->xyc->sc_dev.dv_xname);
2051 	if (iorq->buf)
2052 		printf("%c: ", 'a' + DISKPART(iorq->buf->b_dev));
2053 	if (iopb->com == XYCMD_RD || iopb->com == XYCMD_WR)
2054 		printf("%s %d/%d/%d: ",
2055 			(iopb->com == XYCMD_RD) ? "read" : "write",
2056 			iopb->cyl, iopb->head, iopb->sect);
2057 	printf("%s", xyc_e2str(error));
2058 
2059 	if (still_trying)
2060 		printf(" [still trying, new error=%s]", xyc_e2str(iorq->errno));
2061 	else
2062 		if (iorq->errno == 0)
2063 			printf(" [recovered in %d tries]", iorq->tries);
2064 
2065 	printf("\n");
2066 }
2067 
2068 /*
2069  * xyc_error: non-fatal error encountered... recover.
2070  * return AOK if resubmitted, return FAIL if this iopb is done
2071  */
2072 int
2073 xyc_error(xycsc, iorq, iopb, comm)
2074 	struct xyc_softc *xycsc;
2075 	struct xy_iorq *iorq;
2076 	struct xy_iopb *iopb;
2077 	int     comm;
2078 
2079 {
2080 	int     errno = iorq->errno;
2081 	int     erract = xyc_entoact(errno);
2082 	int     oldmode, advance;
2083 #ifdef __sparc__
2084 	int i;
2085 #endif
2086 
2087 	if (erract == XY_ERA_RSET) {	/* some errors require a reset */
2088 		oldmode = iorq->mode;
2089 		iorq->mode = XY_SUB_DONE | (~XY_SUB_MASK & oldmode);
2090 		/* make xyc_start ignore us */
2091 		xyc_reset(xycsc, 1, XY_RSET_NONE, errno, iorq->xy);
2092 		iorq->mode = oldmode;
2093 	}
2094 	/* check for read/write to a sector in bad144 table if bad: redirect
2095 	 * request to bad144 area */
2096 
2097 	if ((comm == XYCMD_RD || comm == XYCMD_WR) &&
2098 	    (iorq->mode & XY_MODE_B144) == 0) {
2099 		advance = iorq->sectcnt - iopb->scnt;
2100 		XYC_ADVANCE(iorq, advance);
2101 #ifdef __sparc__
2102 		if ((i = isbad(&iorq->xy->dkb, iorq->blockno / iorq->xy->sectpercyl,
2103 			    (iorq->blockno / iorq->xy->nsect) % iorq->xy->nhead,
2104 			    iorq->blockno % iorq->xy->nsect)) != -1) {
2105 			iorq->mode |= XY_MODE_B144;	/* enter bad144 mode &
2106 							 * redirect */
2107 			iopb->errno = iopb->done = iopb->errs = 0;
2108 			iopb->scnt = 1;
2109 			iopb->cyl = (iorq->xy->ncyl + iorq->xy->acyl) - 2;
2110 			/* second to last acyl */
2111 			i = iorq->xy->sectpercyl - 1 - i;	/* follow bad144
2112 								 * standard */
2113 			iopb->head = i / iorq->xy->nhead;
2114 			iopb->sect = i % iorq->xy->nhead;
2115 			/* will resubmit when we come out of remove_iorq */
2116 			return (XY_ERR_AOK);	/* recovered! */
2117 		}
2118 #endif
2119 	}
2120 
2121 	/*
2122 	 * it isn't a bad144 sector, must be real error! see if we can retry
2123 	 * it?
2124 	 */
2125 	if ((iorq->mode & XY_MODE_VERBO) && iorq->lasterror)
2126 		xyc_perror(iorq, iopb, 1);	/* inform of error state
2127 						 * change */
2128 	iorq->lasterror = errno;
2129 
2130 	if ((erract == XY_ERA_RSET || erract == XY_ERA_HARD)
2131 	    && iorq->tries < XYC_MAXTRIES) {	/* retry? */
2132 		iorq->tries++;
2133 		iorq->errno = iopb->errno = iopb->done = iopb->errs = 0;
2134 		/* will resubmit at end of remove_iorq */
2135 		return (XY_ERR_AOK);	/* recovered! */
2136 	}
2137 
2138 	/* failed to recover from this error */
2139 	return (XY_ERR_FAIL);
2140 }
2141 
2142 /*
2143  * xyc_tick: make sure xy is still alive and ticking (err, kicking).
2144  */
2145 void
2146 xyc_tick(arg)
2147 	void   *arg;
2148 
2149 {
2150 	struct xyc_softc *xycsc = arg;
2151 	int     lcv, s, reset = 0;
2152 
2153 	/* reduce ttl for each request if one goes to zero, reset xyc */
2154 	s = splbio();
2155 	for (lcv = 0; lcv < XYC_MAXIOPB; lcv++) {
2156 		if (xycsc->reqs[lcv].mode == 0 ||
2157 		    XY_STATE(xycsc->reqs[lcv].mode) == XY_SUB_DONE)
2158 			continue;
2159 		xycsc->reqs[lcv].ttl--;
2160 		if (xycsc->reqs[lcv].ttl == 0)
2161 			reset = 1;
2162 	}
2163 	if (reset) {
2164 		printf("%s: watchdog timeout\n", xycsc->sc_dev.dv_xname);
2165 		xyc_reset(xycsc, 0, XY_RSET_NONE, XY_ERR_FAIL, NULL);
2166 	}
2167 	splx(s);
2168 
2169 	/* until next time */
2170 
2171 	callout_reset(&xycsc->sc_tick_ch, XYC_TICKCNT, xyc_tick, xycsc);
2172 }
2173 
2174 /*
2175  * xyc_ioctlcmd: this function provides a user level interface to the
2176  * controller via ioctl.   this allows "format" programs to be written
2177  * in user code, and is also useful for some debugging.   we return
2178  * an error code.   called at user priority.
2179  *
2180  * XXX missing a few commands (see the 7053 driver for ideas)
2181  */
2182 int
2183 xyc_ioctlcmd(xy, dev, xio)
2184 	struct xy_softc *xy;
2185 	dev_t   dev;
2186 	struct xd_iocmd *xio;
2187 
2188 {
2189 	int     s, rqno, dummy = 0;
2190 	caddr_t dvmabuf = NULL, buf = NULL;
2191 	struct xyc_softc *xycsc;
2192 	int			rseg, error;
2193 	bus_dma_segment_t	seg;
2194 
2195 	/* check sanity of requested command */
2196 
2197 	switch (xio->cmd) {
2198 
2199 	case XYCMD_NOP:	/* no op: everything should be zero */
2200 		if (xio->subfn || xio->dptr || xio->dlen ||
2201 		    xio->block || xio->sectcnt)
2202 			return (EINVAL);
2203 		break;
2204 
2205 	case XYCMD_RD:		/* read / write sectors (up to XD_IOCMD_MAXS) */
2206 	case XYCMD_WR:
2207 		if (xio->subfn || xio->sectcnt > XD_IOCMD_MAXS ||
2208 		    xio->sectcnt * XYFM_BPS != xio->dlen || xio->dptr == NULL)
2209 			return (EINVAL);
2210 		break;
2211 
2212 	case XYCMD_SK:		/* seek: doesn't seem useful to export this */
2213 		return (EINVAL);
2214 
2215 		break;
2216 
2217 	default:
2218 		return (EINVAL);/* ??? */
2219 	}
2220 
2221 	xycsc = xy->parent;
2222 
2223 	/* create DVMA buffer for request if needed */
2224 	if (xio->dlen) {
2225 		if ((error = xy_dmamem_alloc(xycsc->dmatag, xycsc->auxmap,
2226 					     &seg, &rseg,
2227 					     xio->dlen, &buf,
2228 					     (bus_addr_t *)&dvmabuf)) != 0) {
2229 			return (error);
2230 		}
2231 
2232 		if (xio->cmd == XYCMD_WR) {
2233 			if ((error = copyin(xio->dptr, buf, xio->dlen)) != 0) {
2234 				bus_dmamem_unmap(xycsc->dmatag, buf, xio->dlen);
2235 				bus_dmamem_free(xycsc->dmatag, &seg, rseg);
2236 				return (error);
2237 			}
2238 		}
2239 	}
2240 	/* do it! */
2241 
2242 	error = 0;
2243 	s = splbio();
2244 	rqno = xyc_cmd(xycsc, xio->cmd, xio->subfn, xy->xy_drive, xio->block,
2245 	    xio->sectcnt, dvmabuf, XY_SUB_WAIT);
2246 	if (rqno == XY_ERR_FAIL) {
2247 		error = EIO;
2248 		goto done;
2249 	}
2250 	xio->errno = xycsc->ciorq->errno;
2251 	xio->tries = xycsc->ciorq->tries;
2252 	XYC_DONE(xycsc, dummy);
2253 
2254 	if (xio->cmd == XYCMD_RD)
2255 		error = copyout(buf, xio->dptr, xio->dlen);
2256 
2257 done:
2258 	splx(s);
2259 	if (dvmabuf) {
2260 		xy_dmamem_free(xycsc->dmatag, xycsc->auxmap, &seg, rseg,
2261 				xio->dlen, buf);
2262 	}
2263 	return (error);
2264 }
2265 
2266 /*
2267  * xyc_e2str: convert error code number into an error string
2268  */
2269 char *
2270 xyc_e2str(no)
2271 	int     no;
2272 {
2273 	switch (no) {
2274 	case XY_ERR_FAIL:
2275 		return ("Software fatal error");
2276 	case XY_ERR_DERR:
2277 		return ("DOUBLE ERROR");
2278 	case XY_ERR_AOK:
2279 		return ("Successful completion");
2280 	case XY_ERR_IPEN:
2281 		return("Interrupt pending");
2282 	case XY_ERR_BCFL:
2283 		return("Busy conflict");
2284 	case XY_ERR_TIMO:
2285 		return("Operation timeout");
2286 	case XY_ERR_NHDR:
2287 		return("Header not found");
2288 	case XY_ERR_HARD:
2289 		return("Hard ECC error");
2290 	case XY_ERR_ICYL:
2291 		return("Illegal cylinder address");
2292 	case XY_ERR_ISEC:
2293 		return("Illegal sector address");
2294 	case XY_ERR_SMAL:
2295 		return("Last sector too small");
2296 	case XY_ERR_SACK:
2297 		return("Slave ACK error (non-existent memory)");
2298 	case XY_ERR_CHER:
2299 		return("Cylinder and head/header error");
2300 	case XY_ERR_SRTR:
2301 		return("Auto-seek retry successful");
2302 	case XY_ERR_WPRO:
2303 		return("Write-protect error");
2304 	case XY_ERR_UIMP:
2305 		return("Unimplemented command");
2306 	case XY_ERR_DNRY:
2307 		return("Drive not ready");
2308 	case XY_ERR_SZER:
2309 		return("Sector count zero");
2310 	case XY_ERR_DFLT:
2311 		return("Drive faulted");
2312 	case XY_ERR_ISSZ:
2313 		return("Illegal sector size");
2314 	case XY_ERR_SLTA:
2315 		return("Self test A");
2316 	case XY_ERR_SLTB:
2317 		return("Self test B");
2318 	case XY_ERR_SLTC:
2319 		return("Self test C");
2320 	case XY_ERR_SOFT:
2321 		return("Soft ECC error");
2322 	case XY_ERR_SFOK:
2323 		return("Soft ECC error recovered");
2324 	case XY_ERR_IHED:
2325 		return("Illegal head");
2326 	case XY_ERR_DSEQ:
2327 		return("Disk sequencer error");
2328 	case XY_ERR_SEEK:
2329 		return("Seek error");
2330 	default:
2331 		return ("Unknown error");
2332 	}
2333 }
2334 
2335 int
2336 xyc_entoact(errno)
2337 
2338 int errno;
2339 
2340 {
2341   switch (errno) {
2342     case XY_ERR_FAIL:	case XY_ERR_DERR:	case XY_ERR_IPEN:
2343     case XY_ERR_BCFL:	case XY_ERR_ICYL:	case XY_ERR_ISEC:
2344     case XY_ERR_UIMP:	case XY_ERR_SZER:	case XY_ERR_ISSZ:
2345     case XY_ERR_SLTA:	case XY_ERR_SLTB:	case XY_ERR_SLTC:
2346     case XY_ERR_IHED:	case XY_ERR_SACK:	case XY_ERR_SMAL:
2347 
2348 	return(XY_ERA_PROG); /* program error ! */
2349 
2350     case XY_ERR_TIMO:	case XY_ERR_NHDR:	case XY_ERR_HARD:
2351     case XY_ERR_DNRY:	case XY_ERR_CHER:	case XY_ERR_SEEK:
2352     case XY_ERR_SOFT:
2353 
2354 	return(XY_ERA_HARD); /* hard error, retry */
2355 
2356     case XY_ERR_DFLT:	case XY_ERR_DSEQ:
2357 
2358 	return(XY_ERA_RSET); /* hard error reset */
2359 
2360     case XY_ERR_SRTR:	case XY_ERR_SFOK:	case XY_ERR_AOK:
2361 
2362 	return(XY_ERA_SOFT); /* an FYI error */
2363 
2364     case XY_ERR_WPRO:
2365 
2366 	return(XY_ERA_WPRO); /* write protect */
2367   }
2368 
2369   return(XY_ERA_PROG); /* ??? */
2370 }
2371