xref: /netbsd/sys/dev/i2o/ld_iop.c (revision bf9ec67e)
1 /*	$NetBSD: ld_iop.c,v 1.10 2001/11/13 12:24:59 lukem Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * I2O front-end for ld(4) driver, supporting random block storage class
41  * devices.  Currently, this doesn't handle anything more complex than
42  * fixed direct-access devices.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: ld_iop.c,v 1.10 2001/11/13 12:24:59 lukem Exp $");
47 
48 #include "opt_i2o.h"
49 #include "rnd.h"
50 
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/kernel.h>
54 #include <sys/device.h>
55 #include <sys/buf.h>
56 #include <sys/endian.h>
57 #include <sys/dkio.h>
58 #include <sys/disk.h>
59 #include <sys/proc.h>
60 #if NRND > 0
61 #include <sys/rnd.h>
62 #endif
63 
64 #include <machine/bus.h>
65 
66 #include <dev/ldvar.h>
67 
68 #include <dev/i2o/i2o.h>
69 #include <dev/i2o/iopio.h>
70 #include <dev/i2o/iopvar.h>
71 
72 #define	LD_IOP_TIMEOUT		30*1000
73 
74 #define	LD_IOP_CLAIMED		0x01
75 #define	LD_IOP_NEW_EVTMASK	0x02
76 
77 struct ld_iop_softc {
78 	struct	ld_softc sc_ld;
79 	struct	iop_initiator sc_ii;
80 	struct	iop_initiator sc_eventii;
81 	int	sc_flags;
82 };
83 
84 static void	ld_iop_adjqparam(struct device *, int);
85 static void	ld_iop_attach(struct device *, struct device *, void *);
86 static int	ld_iop_detach(struct device *, int);
87 static int	ld_iop_dump(struct ld_softc *, void *, int, int);
88 static int	ld_iop_flush(struct ld_softc *);
89 static void	ld_iop_intr(struct device *, struct iop_msg *, void *);
90 static void	ld_iop_intr_event(struct device *, struct iop_msg *, void *);
91 static int	ld_iop_match(struct device *, struct cfdata *, void *);
92 static int	ld_iop_start(struct ld_softc *, struct buf *);
93 static void	ld_iop_unconfig(struct ld_iop_softc *, int);
94 
95 struct cfattach ld_iop_ca = {
96 	sizeof(struct ld_iop_softc),
97 	ld_iop_match,
98 	ld_iop_attach,
99 	ld_iop_detach
100 };
101 
102 #ifdef I2OVERBOSE
103 static const char * const ld_iop_errors[] = {
104 	"success",
105 	"media error",
106 	"access error",
107 	"device failure",
108 	"device not ready",
109 	"media not present",
110 	"media locked",
111 	"media failure",
112 	"protocol failure",
113 	"bus failure",
114 	"access violation",
115 	"media write protected",
116 	"device reset",
117 	"volume changed, waiting for acknowledgement",
118 	"timeout",
119 };
120 #endif
121 
122 static int
123 ld_iop_match(struct device *parent, struct cfdata *match, void *aux)
124 {
125 	struct iop_attach_args *ia;
126 
127 	ia = aux;
128 
129 	return (ia->ia_class == I2O_CLASS_RANDOM_BLOCK_STORAGE);
130 }
131 
132 static void
133 ld_iop_attach(struct device *parent, struct device *self, void *aux)
134 {
135 	struct iop_attach_args *ia;
136 	struct ld_softc *ld;
137 	struct ld_iop_softc *sc;
138 	struct iop_softc *iop;
139 	int rv, evreg, enable;
140 	char *typestr, *fixedstr;
141 	u_int cachesz;
142 	u_int32_t timeoutbase, rwvtimeoutbase, rwvtimeout;
143 	struct {
144 		struct	i2o_param_op_results pr;
145 		struct	i2o_param_read_results prr;
146 		union {
147 			struct	i2o_param_rbs_cache_control cc;
148 			struct	i2o_param_rbs_device_info bdi;
149 		} p;
150 	} __attribute__ ((__packed__)) param;
151 
152 	sc = (struct ld_iop_softc *)self;
153 	ld = &sc->sc_ld;
154 	iop = (struct iop_softc *)parent;
155 	ia = (struct iop_attach_args *)aux;
156 	evreg = 0;
157 
158 	/* Register us as an initiator. */
159 	sc->sc_ii.ii_dv = self;
160 	sc->sc_ii.ii_intr = ld_iop_intr;
161 	sc->sc_ii.ii_adjqparam = ld_iop_adjqparam;
162 	sc->sc_ii.ii_flags = 0;
163 	sc->sc_ii.ii_tid = ia->ia_tid;
164 	iop_initiator_register(iop, &sc->sc_ii);
165 
166 	/* Register another initiator to handle events from the device. */
167 	sc->sc_eventii.ii_dv = self;
168 	sc->sc_eventii.ii_intr = ld_iop_intr_event;
169 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
170 	sc->sc_eventii.ii_tid = ia->ia_tid;
171 	iop_initiator_register(iop, &sc->sc_eventii);
172 
173 	rv = iop_util_eventreg(iop, &sc->sc_eventii,
174 	    I2O_EVENT_GEN_EVENT_MASK_MODIFIED |
175 	    I2O_EVENT_GEN_DEVICE_RESET |
176 	    I2O_EVENT_GEN_STATE_CHANGE |
177 	    I2O_EVENT_GEN_GENERAL_WARNING);
178 	if (rv != 0) {
179 		printf("%s: unable to register for events", self->dv_xname);
180 		goto bad;
181 	}
182 	evreg = 1;
183 
184 	/*
185 	 * Start out with one queued command.  The `iop' driver will adjust
186 	 * the queue parameters once we're up and running.
187 	 */
188 	ld->sc_maxqueuecnt = 1;
189 
190 	ld->sc_maxxfer = IOP_MAX_XFER;
191 	ld->sc_dump = ld_iop_dump;
192 	ld->sc_flush = ld_iop_flush;
193 	ld->sc_start = ld_iop_start;
194 
195 	/* Say what the device is. */
196 	printf(":");
197 	iop_print_ident(iop, ia->ia_tid);
198 
199 	/*
200 	 * Claim the device so that we don't get any nasty surprises.  Allow
201 	 * failure.
202 	 */
203 	rv = iop_util_claim(iop, &sc->sc_ii, 0,
204 	    I2O_UTIL_CLAIM_CAPACITY_SENSITIVE |
205 	    I2O_UTIL_CLAIM_NO_PEER_SERVICE |
206 	    I2O_UTIL_CLAIM_NO_MANAGEMENT_SERVICE |
207 	    I2O_UTIL_CLAIM_PRIMARY_USER);
208 	sc->sc_flags = rv ? 0 : LD_IOP_CLAIMED;
209 
210 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_DEVICE_INFO,
211 	    &param, sizeof(param), NULL);
212 	if (rv != 0)
213 		goto bad;
214 
215 	ld->sc_secsize = le32toh(param.p.bdi.blocksize);
216 	ld->sc_secperunit = (int)
217 	    (le64toh(param.p.bdi.capacity) / ld->sc_secsize);
218 
219 	switch (param.p.bdi.type) {
220 	case I2O_RBS_TYPE_DIRECT:
221 		typestr = "direct access";
222 		enable = 1;
223 		break;
224 	case I2O_RBS_TYPE_WORM:
225 		typestr = "WORM";
226 		enable = 0;
227 		break;
228 	case I2O_RBS_TYPE_CDROM:
229 		typestr = "CD-ROM";
230 		enable = 0;
231 		break;
232 	case I2O_RBS_TYPE_OPTICAL:
233 		typestr = "optical";
234 		enable = 0;
235 		break;
236 	default:
237 		typestr = "unknown";
238 		enable = 0;
239 		break;
240 	}
241 
242 	if ((le32toh(param.p.bdi.capabilities) & I2O_RBS_CAP_REMOVEABLE_MEDIA)
243 	    != 0) {
244 		/* ld->sc_flags = LDF_REMOVEABLE; */
245 		fixedstr = "removeable";
246 		enable = 0;
247 	} else
248 		fixedstr = "fixed";
249 
250 	printf(" %s, %s", typestr, fixedstr);
251 
252 	/*
253 	 * Determine if the device has an private cache.  If so, print the
254 	 * cache size.  Even if the device doesn't appear to have a cache,
255 	 * we perform a flush at shutdown.
256 	 */
257 	rv = iop_field_get_all(iop, ia->ia_tid, I2O_PARAM_RBS_CACHE_CONTROL,
258 	    &param, sizeof(param), NULL);
259 	if (rv != 0)
260 		goto bad;
261 
262 	if ((cachesz = le32toh(param.p.cc.totalcachesize)) != 0)
263 		printf(", %dkB cache", cachesz >> 10);
264 
265 	printf("\n");
266 
267 	/*
268 	 * Configure the DDM's timeout functions to time out all commands
269 	 * after 30 seconds.
270 	 */
271 	timeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
272 	rwvtimeoutbase = htole32(LD_IOP_TIMEOUT * 1000);
273 	rwvtimeout = 0;
274 
275 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
276 	    &timeoutbase, sizeof(timeoutbase),
277 	    I2O_PARAM_RBS_OPERATION_timeoutbase);
278 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
279 	    &rwvtimeoutbase, sizeof(rwvtimeoutbase),
280 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
281 	iop_field_set(iop, ia->ia_tid, I2O_PARAM_RBS_OPERATION,
282 	    &rwvtimeout, sizeof(rwvtimeout),
283 	    I2O_PARAM_RBS_OPERATION_rwvtimeoutbase);
284 
285 	if (enable)
286 		ld->sc_flags |= LDF_ENABLED;
287 	else
288 		printf("%s: device not yet supported\n", self->dv_xname);
289 
290 	ldattach(ld);
291 	return;
292 
293  bad:
294 	ld_iop_unconfig(sc, evreg);
295 }
296 
297 static void
298 ld_iop_unconfig(struct ld_iop_softc *sc, int evreg)
299 {
300 	struct iop_softc *iop;
301 	int s;
302 
303 	iop = (struct iop_softc *)sc->sc_ld.sc_dv.dv_parent;
304 
305 	if ((sc->sc_flags & LD_IOP_CLAIMED) != 0)
306 		iop_util_claim(iop, &sc->sc_ii, 1,
307 		    I2O_UTIL_CLAIM_PRIMARY_USER);
308 
309 	if (evreg) {
310 		/*
311 		 * Mask off events, and wait up to 5 seconds for a reply.
312 		 * Note that some adapters won't reply to this (XXX We
313 		 * should check the event capabilities).
314 		 */
315 		sc->sc_flags &= ~LD_IOP_NEW_EVTMASK;
316 		iop_util_eventreg(iop, &sc->sc_eventii,
317 		    I2O_EVENT_GEN_EVENT_MASK_MODIFIED);
318 		s = splbio();
319 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
320 			tsleep(&sc->sc_eventii, PRIBIO, "ld_iopevt", hz * 5);
321 		splx(s);
322 #ifdef I2ODEBUG
323 		if ((sc->sc_flags & LD_IOP_NEW_EVTMASK) == 0)
324 			printf("%s: didn't reply to event unregister",
325 			    sc->sc_ld.sc_dv.dv_xname);
326 #endif
327 	}
328 
329 	iop_initiator_unregister(iop, &sc->sc_eventii);
330 	iop_initiator_unregister(iop, &sc->sc_ii);
331 }
332 
333 static int
334 ld_iop_detach(struct device *self, int flags)
335 {
336 	struct ld_iop_softc *sc;
337 	struct iop_softc *iop;
338 	int rv;
339 
340 	sc = (struct ld_iop_softc *)self;
341 	iop = (struct iop_softc *)self->dv_parent;
342 
343 	if ((rv = ldbegindetach(&sc->sc_ld, flags)) != 0)
344 		return (rv);
345 
346 	/*
347 	 * Abort any requests queued with the IOP, but allow requests that
348 	 * are already in progress to complete.
349 	 */
350 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
351 		iop_util_abort(iop, &sc->sc_ii, 0, 0,
352 		    I2O_UTIL_ABORT_WILD | I2O_UTIL_ABORT_CLEAN);
353 
354 	ldenddetach(&sc->sc_ld);
355 
356 	/* Un-claim the target, and un-register our initiators. */
357 	if ((sc->sc_ld.sc_flags & LDF_ENABLED) != 0)
358 		ld_iop_unconfig(sc, 1);
359 
360 	return (0);
361 }
362 
363 static int
364 ld_iop_start(struct ld_softc *ld, struct buf *bp)
365 {
366 	struct iop_msg *im;
367 	struct iop_softc *iop;
368 	struct ld_iop_softc *sc;
369 	struct i2o_rbs_block_read *mf;
370 	u_int rv, flags, write;
371 	u_int64_t ba;
372 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
373 
374 	sc = (struct ld_iop_softc *)ld;
375 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
376 
377 	im = iop_msg_alloc(iop, 0);
378 	im->im_dvcontext = bp;
379 
380 	write = ((bp->b_flags & B_READ) == 0);
381 	ba = (u_int64_t)bp->b_rawblkno * ld->sc_secsize;
382 
383 	/*
384 	 * Write through the cache when performing synchronous writes.  When
385 	 * performing a read, we don't request that the DDM cache the data,
386 	 * as there's little advantage to it.
387 	 */
388 	if (write) {
389 		if ((bp->b_flags & B_ASYNC) == 0)
390 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WT;
391 		else
392 			flags = I2O_RBS_BLOCK_WRITE_CACHE_WB;
393 	} else
394 		flags = 0;
395 
396 	/*
397 	 * Fill the message frame.  We can use the block_read structure for
398 	 * both reads and writes, as it's almost identical to the
399 	 * block_write structure.
400 	 */
401 	mf = (struct i2o_rbs_block_read *)mb;
402 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_read);
403 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid,
404 	    write ? I2O_RBS_BLOCK_WRITE : I2O_RBS_BLOCK_READ);
405 	mf->msgictx = sc->sc_ii.ii_ictx;
406 	mf->msgtctx = im->im_tctx;
407 	mf->flags = flags | (1 << 16);		/* flags & time multiplier */
408 	mf->datasize = bp->b_bcount;
409 	mf->lowoffset = (u_int32_t)ba;
410 	mf->highoffset = (u_int32_t)(ba >> 32);
411 
412 	/* Map the data transfer and enqueue the command. */
413 	rv = iop_msg_map_bio(iop, im, mb, bp->b_data, bp->b_bcount, write);
414 	if (rv == 0) {
415 		if ((rv = iop_post(iop, mb)) != 0) {
416 			iop_msg_unmap(iop, im);
417 			iop_msg_free(iop, im);
418 		}
419 	}
420 	return (rv);
421 }
422 
423 static int
424 ld_iop_dump(struct ld_softc *ld, void *data, int blkno, int blkcnt)
425 {
426 	struct iop_msg *im;
427 	struct iop_softc *iop;
428 	struct ld_iop_softc *sc;
429 	struct i2o_rbs_block_write *mf;
430 	int rv, bcount;
431 	u_int64_t ba;
432 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
433 
434 	sc = (struct ld_iop_softc *)ld;
435 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
436 	bcount = blkcnt * ld->sc_secsize;
437 	ba = (u_int64_t)blkno * ld->sc_secsize;
438 	im = iop_msg_alloc(iop, IM_POLL);
439 
440 	mf = (struct i2o_rbs_block_write *)mb;
441 	mf->msgflags = I2O_MSGFLAGS(i2o_rbs_block_write);
442 	mf->msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_BLOCK_WRITE);
443 	mf->msgictx = sc->sc_ii.ii_ictx;
444 	mf->msgtctx = im->im_tctx;
445 	mf->flags = I2O_RBS_BLOCK_WRITE_CACHE_WT | (1 << 16);
446 	mf->datasize = bcount;
447 	mf->lowoffset = (u_int32_t)ba;
448 	mf->highoffset = (u_int32_t)(ba >> 32);
449 
450 	if ((rv = iop_msg_map(iop, im, mb, data, bcount, 1, NULL)) != 0) {
451 		iop_msg_free(iop, im);
452 		return (rv);
453 	}
454 
455 	rv = iop_msg_post(iop, im, mb, LD_IOP_TIMEOUT * 2);
456 	iop_msg_unmap(iop, im);
457 	iop_msg_free(iop, im);
458  	return (rv);
459 }
460 
461 static int
462 ld_iop_flush(struct ld_softc *ld)
463 {
464 	struct iop_msg *im;
465 	struct iop_softc *iop;
466 	struct ld_iop_softc *sc;
467 	struct i2o_rbs_cache_flush mf;
468 	int rv;
469 
470 	sc = (struct ld_iop_softc *)ld;
471 	iop = (struct iop_softc *)ld->sc_dv.dv_parent;
472 	im = iop_msg_alloc(iop, IM_WAIT);
473 
474 	mf.msgflags = I2O_MSGFLAGS(i2o_rbs_cache_flush);
475 	mf.msgfunc = I2O_MSGFUNC(sc->sc_ii.ii_tid, I2O_RBS_CACHE_FLUSH);
476 	mf.msgictx = sc->sc_ii.ii_ictx;
477 	mf.msgtctx = im->im_tctx;
478 	mf.flags = 1 << 16;			/* time multiplier */
479 
480 	/* XXX Aincent disks will return an error here. */
481 	rv = iop_msg_post(iop, im, &mf, LD_IOP_TIMEOUT * 2);
482 	iop_msg_free(iop, im);
483 	return (rv);
484 }
485 
486 void
487 ld_iop_intr(struct device *dv, struct iop_msg *im, void *reply)
488 {
489 	struct i2o_rbs_reply *rb;
490 	struct buf *bp;
491 	struct ld_iop_softc *sc;
492 	struct iop_softc *iop;
493 	int err, detail;
494 #ifdef I2OVERBOSE
495 	const char *errstr;
496 #endif
497 
498 	rb = reply;
499 	bp = im->im_dvcontext;
500 	sc = (struct ld_iop_softc *)dv;
501 	iop = (struct iop_softc *)dv->dv_parent;
502 
503 	err = ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0);
504 
505 	if (!err && rb->reqstatus != I2O_STATUS_SUCCESS) {
506 		detail = le16toh(rb->detail);
507 #ifdef I2OVERBOSE
508 		if (detail > sizeof(ld_iop_errors) / sizeof(ld_iop_errors[0]))
509 			errstr = "<unknown>";
510 		else
511 			errstr = ld_iop_errors[detail];
512 		printf("%s: error 0x%04x: %s\n", dv->dv_xname, detail, errstr);
513 #else
514 		printf("%s: error 0x%04x\n", dv->dv_xname, detail);
515 #endif
516 		err = 1;
517 	}
518 
519 	if (err) {
520 		bp->b_flags |= B_ERROR;
521 		bp->b_error = EIO;
522 		bp->b_resid = bp->b_bcount;
523 	} else
524 		bp->b_resid = bp->b_bcount - le32toh(rb->transfercount);
525 
526 	iop_msg_unmap(iop, im);
527 	iop_msg_free(iop, im);
528 	lddone(&sc->sc_ld, bp);
529 }
530 
531 static void
532 ld_iop_intr_event(struct device *dv, struct iop_msg *im, void *reply)
533 {
534 	struct i2o_util_event_register_reply *rb;
535 	struct ld_iop_softc *sc;
536 	u_int event;
537 
538 	rb = reply;
539 
540 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
541 		return;
542 
543 	event = le32toh(rb->event);
544 	sc = (struct ld_iop_softc *)dv;
545 
546 	if (event == I2O_EVENT_GEN_EVENT_MASK_MODIFIED) {
547 		sc->sc_flags |= LD_IOP_NEW_EVTMASK;
548 		wakeup(&sc->sc_eventii);
549 #ifndef I2ODEBUG
550 		return;
551 #endif
552 	}
553 
554 	printf("%s: event 0x%08x received\n", dv->dv_xname, event);
555 }
556 
557 static void
558 ld_iop_adjqparam(struct device *dv, int mpi)
559 {
560 	struct iop_softc *iop;
561 
562 	/*
563 	 * AMI controllers seem to loose the plot if you hand off lots of
564 	 * queued commands.
565 	 */
566 	iop = (struct iop_softc *)dv->dv_parent;
567 	if (le16toh(I2O_ORG_AMI) == iop->sc_status.orgid && mpi > 64)
568 		mpi = 64;
569 
570 	ldadjqparam((struct ld_softc *)dv, mpi);
571 }
572