xref: /netbsd/sys/dev/i2o/iop.c (revision beecddb6)
1 /*	$NetBSD: iop.c,v 1.92 2021/08/07 16:19:11 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Support for I2O IOPs (intelligent I/O processors).
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.92 2021/08/07 16:19:11 thorpej Exp $");
38 
39 #include "iop.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
54 
55 #include <dev/i2o/i2o.h>
56 #include <dev/i2o/iopio.h>
57 #include <dev/i2o/iopreg.h>
58 #include <dev/i2o/iopvar.h>
59 
60 #include "ioconf.h"
61 #include "locators.h"
62 
63 #define POLL(ms, cond)				\
64 do {						\
65 	int xi;					\
66 	for (xi = (ms) * 10; xi; xi--) {	\
67 		if (cond)			\
68 			break;			\
69 		DELAY(100);			\
70 	}					\
71 } while (/* CONSTCOND */0);
72 
73 #ifdef I2ODEBUG
74 #define DPRINTF(x)	printf x
75 #else
76 #define	DPRINTF(x)
77 #endif
78 
79 #define IOP_ICTXHASH_NBUCKETS	16
80 #define	IOP_ICTXHASH(ictx)	(&iop_ictxhashtbl[(ictx) & iop_ictxhash])
81 
82 #define	IOP_MAX_SEGS	(((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
83 
84 #define	IOP_TCTX_SHIFT	12
85 #define	IOP_TCTX_MASK	((1 << IOP_TCTX_SHIFT) - 1)
86 
87 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
88 static u_long	iop_ictxhash;
89 static void	*iop_sdh;
90 static struct	i2o_systab *iop_systab;
91 static int	iop_systab_size;
92 
93 dev_type_open(iopopen);
94 dev_type_close(iopclose);
95 dev_type_ioctl(iopioctl);
96 
97 const struct cdevsw iop_cdevsw = {
98 	.d_open = iopopen,
99 	.d_close = iopclose,
100 	.d_read = noread,
101 	.d_write = nowrite,
102 	.d_ioctl = iopioctl,
103 	.d_stop = nostop,
104 	.d_tty = notty,
105 	.d_poll = nopoll,
106 	.d_mmap = nommap,
107 	.d_kqfilter = nokqfilter,
108 	.d_discard = nodiscard,
109 	.d_flag = D_OTHER,
110 };
111 
112 #define	IC_CONFIGURE	0x01
113 #define	IC_PRIORITY	0x02
114 
115 static struct iop_class {
116 	u_short	ic_class;
117 	u_short	ic_flags;
118 	const char *ic_caption;
119 } const iop_class[] = {
120 	{
121 		I2O_CLASS_EXECUTIVE,
122 		0,
123 		"executive"
124 	},
125 	{
126 		I2O_CLASS_DDM,
127 		0,
128 		"device driver module"
129 	},
130 	{
131 		I2O_CLASS_RANDOM_BLOCK_STORAGE,
132 		IC_CONFIGURE | IC_PRIORITY,
133 		"random block storage"
134 	},
135 	{
136 		I2O_CLASS_SEQUENTIAL_STORAGE,
137 		IC_CONFIGURE | IC_PRIORITY,
138 		"sequential storage"
139 	},
140 	{
141 		I2O_CLASS_LAN,
142 		IC_CONFIGURE | IC_PRIORITY,
143 		"LAN port"
144 	},
145 	{
146 		I2O_CLASS_WAN,
147 		IC_CONFIGURE | IC_PRIORITY,
148 		"WAN port"
149 	},
150 	{
151 		I2O_CLASS_FIBRE_CHANNEL_PORT,
152 		IC_CONFIGURE,
153 		"fibrechannel port"
154 	},
155 	{
156 		I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
157 		0,
158 		"fibrechannel peripheral"
159 	},
160  	{
161  		I2O_CLASS_SCSI_PERIPHERAL,
162  		0,
163  		"SCSI peripheral"
164  	},
165 	{
166 		I2O_CLASS_ATE_PORT,
167 		IC_CONFIGURE,
168 		"ATE port"
169 	},
170 	{
171 		I2O_CLASS_ATE_PERIPHERAL,
172 		0,
173 		"ATE peripheral"
174 	},
175 	{
176 		I2O_CLASS_FLOPPY_CONTROLLER,
177 		IC_CONFIGURE,
178 		"floppy controller"
179 	},
180 	{
181 		I2O_CLASS_FLOPPY_DEVICE,
182 		0,
183 		"floppy device"
184 	},
185 	{
186 		I2O_CLASS_BUS_ADAPTER_PORT,
187 		IC_CONFIGURE,
188 		"bus adapter port"
189 	},
190 };
191 
192 #ifdef I2ODEBUG
193 static const char * const iop_status[] = {
194 	"success",
195 	"abort (dirty)",
196 	"abort (no data transfer)",
197 	"abort (partial transfer)",
198 	"error (dirty)",
199 	"error (no data transfer)",
200 	"error (partial transfer)",
201 	"undefined error code",
202 	"process abort (dirty)",
203 	"process abort (no data transfer)",
204 	"process abort (partial transfer)",
205 	"transaction error",
206 };
207 #endif
208 
209 static inline u_int32_t	iop_inl(struct iop_softc *, int);
210 static inline void	iop_outl(struct iop_softc *, int, u_int32_t);
211 
212 static inline u_int32_t	iop_inl_msg(struct iop_softc *, int);
213 static inline void	iop_outl_msg(struct iop_softc *, int, u_int32_t);
214 
215 static void	iop_config_interrupts(device_t);
216 static void	iop_configure_devices(struct iop_softc *, int, int);
217 static void	iop_devinfo(int, char *, size_t);
218 static int	iop_print(void *, const char *);
219 static void	iop_shutdown(void *);
220 
221 static void	iop_adjqparam(struct iop_softc *, int);
222 static int	iop_handle_reply(struct iop_softc *, u_int32_t);
223 static int	iop_hrt_get(struct iop_softc *);
224 static int	iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
225 static void	iop_intr_event(device_t, struct iop_msg *, void *);
226 static int	iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
227 			     u_int32_t);
228 static void	iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
229 static void	iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
230 static int	iop_ofifo_init(struct iop_softc *);
231 static int	iop_passthrough(struct iop_softc *, struct ioppt *,
232 				struct proc *);
233 static void	iop_reconf_thread(void *);
234 static void	iop_release_mfa(struct iop_softc *, u_int32_t);
235 static int	iop_reset(struct iop_softc *);
236 static int	iop_sys_enable(struct iop_softc *);
237 static int	iop_systab_set(struct iop_softc *);
238 static void	iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
239 
240 #ifdef I2ODEBUG
241 static void	iop_reply_print(struct iop_softc *, struct i2o_reply *);
242 #endif
243 
244 static inline u_int32_t
iop_inl(struct iop_softc * sc,int off)245 iop_inl(struct iop_softc *sc, int off)
246 {
247 
248 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
249 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
250 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
251 }
252 
253 static inline void
iop_outl(struct iop_softc * sc,int off,u_int32_t val)254 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
255 {
256 
257 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
258 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
259 	    BUS_SPACE_BARRIER_WRITE);
260 }
261 
262 static inline u_int32_t
iop_inl_msg(struct iop_softc * sc,int off)263 iop_inl_msg(struct iop_softc *sc, int off)
264 {
265 
266 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
267 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
268 	return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
269 }
270 
271 static inline void
iop_outl_msg(struct iop_softc * sc,int off,u_int32_t val)272 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
273 {
274 
275 	bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
276 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
277 	    BUS_SPACE_BARRIER_WRITE);
278 }
279 
280 /*
281  * Initialise the IOP and our interface.
282  */
283 void
iop_init(struct iop_softc * sc,const char * intrstr)284 iop_init(struct iop_softc *sc, const char *intrstr)
285 {
286 	struct iop_msg *im;
287 	int rv, i, j, state, nsegs;
288 	u_int32_t mask;
289 	char ident[64];
290 
291 	state = 0;
292 
293 	printf("I2O adapter");
294 
295 	mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
296 	mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
297 	cv_init(&sc->sc_confcv, "iopconf");
298 
299 	if (iop_ictxhashtbl == NULL) {
300 		iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
301 		    true, &iop_ictxhash);
302 	}
303 
304 	/* Disable interrupts at the IOP. */
305 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
306 	iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
307 
308 	/* Allocate a scratch DMA map for small miscellaneous shared data. */
309 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
310 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
311 		aprint_error_dev(sc->sc_dev, "cannot create scratch dmamap\n");
312 		return;
313 	}
314 
315 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
316 	    sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
317 		aprint_error_dev(sc->sc_dev, "cannot alloc scratch dmamem\n");
318 		goto bail_out;
319 	}
320 	state++;
321 
322 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
323 	    &sc->sc_scr, 0)) {
324 		aprint_error_dev(sc->sc_dev, "cannot map scratch dmamem\n");
325 		goto bail_out;
326 	}
327 	state++;
328 
329 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
330 	    PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
331 		aprint_error_dev(sc->sc_dev, "cannot load scratch dmamap\n");
332 		goto bail_out;
333 	}
334 	state++;
335 
336 #ifdef I2ODEBUG
337 	/* So that our debug checks don't choke. */
338 	sc->sc_framesize = 128;
339 #endif
340 
341 	/* Avoid syncing the reply map until it's set up. */
342 	sc->sc_curib = 0x123;
343 
344 	/* Reset the adapter and request status. */
345  	if ((rv = iop_reset(sc)) != 0) {
346  		aprint_error_dev(sc->sc_dev, "not responding (reset)\n");
347 		goto bail_out;
348  	}
349 
350  	if ((rv = iop_status_get(sc, 1)) != 0) {
351 		aprint_error_dev(sc->sc_dev, "not responding (get status)\n");
352 		goto bail_out;
353  	}
354 
355 	sc->sc_flags |= IOP_HAVESTATUS;
356 	iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
357 	    ident, sizeof(ident));
358 	printf(" <%s>\n", ident);
359 
360 #ifdef I2ODEBUG
361 	printf("%s: orgid=0x%04x version=%d\n",
362 	    device_xname(sc->sc_dev),
363 	    le16toh(sc->sc_status.orgid),
364 	    (le32toh(sc->sc_status.segnumber) >> 12) & 15);
365 	printf("%s: type want have cbase\n", device_xname(sc->sc_dev));
366 	printf("%s: mem  %04x %04x %08x\n", device_xname(sc->sc_dev),
367 	    le32toh(sc->sc_status.desiredprivmemsize),
368 	    le32toh(sc->sc_status.currentprivmemsize),
369 	    le32toh(sc->sc_status.currentprivmembase));
370 	printf("%s: i/o  %04x %04x %08x\n", device_xname(sc->sc_dev),
371 	    le32toh(sc->sc_status.desiredpriviosize),
372 	    le32toh(sc->sc_status.currentpriviosize),
373 	    le32toh(sc->sc_status.currentpriviobase));
374 #endif
375 
376 	sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
377 	if (sc->sc_maxob > IOP_MAX_OUTBOUND)
378 		sc->sc_maxob = IOP_MAX_OUTBOUND;
379 	sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
380 	if (sc->sc_maxib > IOP_MAX_INBOUND)
381 		sc->sc_maxib = IOP_MAX_INBOUND;
382 	sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
383 	if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
384 		sc->sc_framesize = IOP_MAX_MSG_SIZE;
385 
386 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
387 	if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
388 		aprint_error_dev(sc->sc_dev, "frame size too small (%d)\n",
389 		    sc->sc_framesize);
390 		goto bail_out;
391 	}
392 #endif
393 
394 	/* Allocate message wrappers. */
395 	im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_WAITOK|M_ZERO);
396 	state++;
397 	sc->sc_ims = im;
398 	SLIST_INIT(&sc->sc_im_freelist);
399 
400 	for (i = 0; i < sc->sc_maxib; i++, im++) {
401 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
402 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
403 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
404 		    &im->im_xfer[0].ix_map);
405 		if (rv != 0) {
406 			aprint_error_dev(sc->sc_dev, "couldn't create dmamap (%d)", rv);
407 			goto bail_out3;
408 		}
409 
410 		im->im_tctx = i;
411 		SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
412 		cv_init(&im->im_cv, "iopmsg");
413 	}
414 
415 	/* Initialise the IOP's outbound FIFO. */
416 	if (iop_ofifo_init(sc) != 0) {
417 		aprint_error_dev(sc->sc_dev, "unable to init oubound FIFO\n");
418 		goto bail_out3;
419 	}
420 
421 	/*
422  	 * Defer further configuration until (a) interrupts are working and
423  	 * (b) we have enough information to build the system table.
424  	 */
425 	config_interrupts(sc->sc_dev, iop_config_interrupts);
426 
427 	/* Configure shutdown hook before we start any device activity. */
428 	if (iop_sdh == NULL)
429 		iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
430 
431 	/* Ensure interrupts are enabled at the IOP. */
432 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
433 	iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
434 
435 	if (intrstr != NULL)
436 		printf("%s: interrupting at %s\n", device_xname(sc->sc_dev),
437 		    intrstr);
438 
439 #ifdef I2ODEBUG
440 	printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
441 	    device_xname(sc->sc_dev), sc->sc_maxib,
442 	    le32toh(sc->sc_status.maxinboundmframes),
443 	    sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
444 #endif
445 
446 	return;
447 
448  bail_out3:
449  	if (state > 3) {
450 		for (j = 0; j < i; j++)
451 			bus_dmamap_destroy(sc->sc_dmat,
452 			    sc->sc_ims[j].im_xfer[0].ix_map);
453 		free(sc->sc_ims, M_DEVBUF);
454 	}
455  bail_out:
456 	if (state > 2)
457 		bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
458 	if (state > 1)
459 		bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
460 	if (state > 0)
461 		bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
462 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
463 }
464 
465 /*
466  * Perform autoconfiguration tasks.
467  */
468 static void
iop_config_interrupts(device_t self)469 iop_config_interrupts(device_t self)
470 {
471 	struct iop_attach_args ia;
472 	struct iop_softc *sc, *iop;
473 	struct i2o_systab_entry *ste;
474 	int rv, i, niop;
475 	int locs[IOPCF_NLOCS];
476 
477 	sc = device_private(self);
478 	mutex_enter(&sc->sc_conflock);
479 
480 	LIST_INIT(&sc->sc_iilist);
481 
482 	printf("%s: configuring...\n", device_xname(sc->sc_dev));
483 
484 	if (iop_hrt_get(sc) != 0) {
485 		printf("%s: unable to retrieve HRT\n", device_xname(sc->sc_dev));
486 		mutex_exit(&sc->sc_conflock);
487 		return;
488 	}
489 
490 	/*
491  	 * Build the system table.
492  	 */
493 	if (iop_systab == NULL) {
494 		for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
495 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
496 				continue;
497 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
498 				continue;
499 			if (iop_status_get(iop, 1) != 0) {
500 				aprint_error_dev(sc->sc_dev, "unable to retrieve status\n");
501 				iop->sc_flags &= ~IOP_HAVESTATUS;
502 				continue;
503 			}
504 			niop++;
505 		}
506 		if (niop == 0) {
507 			mutex_exit(&sc->sc_conflock);
508 			return;
509 		}
510 
511 		i = sizeof(struct i2o_systab_entry) * (niop - 1) +
512 		    sizeof(struct i2o_systab);
513 		iop_systab_size = i;
514 		iop_systab = malloc(i, M_DEVBUF, M_WAITOK|M_ZERO);
515 		iop_systab->numentries = niop;
516 		iop_systab->version = I2O_VERSION_11;
517 
518 		for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
519 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
520 				continue;
521 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
522 				continue;
523 
524 			ste->orgid = iop->sc_status.orgid;
525 			ste->iopid = device_unit(iop->sc_dev) + 2;
526 			ste->segnumber =
527 			    htole32(le32toh(iop->sc_status.segnumber) & ~4095);
528 			ste->iopcaps = iop->sc_status.iopcaps;
529 			ste->inboundmsgframesize =
530 			    iop->sc_status.inboundmframesize;
531 			ste->inboundmsgportaddresslow =
532 			    htole32(iop->sc_memaddr + IOP_REG_IFIFO);
533 			ste++;
534 		}
535 	}
536 
537 	/*
538 	 * Post the system table to the IOP and bring it to the OPERATIONAL
539 	 * state.
540 	 */
541 	if (iop_systab_set(sc) != 0) {
542 		aprint_error_dev(sc->sc_dev, "unable to set system table\n");
543 		mutex_exit(&sc->sc_conflock);
544 		return;
545 	}
546 	if (iop_sys_enable(sc) != 0) {
547 		aprint_error_dev(sc->sc_dev, "unable to enable system\n");
548 		mutex_exit(&sc->sc_conflock);
549 		return;
550 	}
551 
552 	/*
553 	 * Set up an event handler for this IOP.
554 	 */
555 	sc->sc_eventii.ii_dv = self;
556 	sc->sc_eventii.ii_intr = iop_intr_event;
557 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
558 	sc->sc_eventii.ii_tid = I2O_TID_IOP;
559 	iop_initiator_register(sc, &sc->sc_eventii);
560 
561 	rv = iop_util_eventreg(sc, &sc->sc_eventii,
562 	    I2O_EVENT_EXEC_RESOURCE_LIMITS |
563 	    I2O_EVENT_EXEC_CONNECTION_FAIL |
564 	    I2O_EVENT_EXEC_ADAPTER_FAULT |
565 	    I2O_EVENT_EXEC_POWER_FAIL |
566 	    I2O_EVENT_EXEC_RESET_PENDING |
567 	    I2O_EVENT_EXEC_RESET_IMMINENT |
568 	    I2O_EVENT_EXEC_HARDWARE_FAIL |
569 	    I2O_EVENT_EXEC_XCT_CHANGE |
570 	    I2O_EVENT_EXEC_DDM_AVAILIBILITY |
571 	    I2O_EVENT_GEN_DEVICE_RESET |
572 	    I2O_EVENT_GEN_STATE_CHANGE |
573 	    I2O_EVENT_GEN_GENERAL_WARNING);
574 	if (rv != 0) {
575 		aprint_error_dev(sc->sc_dev, "unable to register for events");
576 		mutex_exit(&sc->sc_conflock);
577 		return;
578 	}
579 
580 	/*
581 	 * Attempt to match and attach a product-specific extension.
582 	 */
583 	ia.ia_class = I2O_CLASS_ANY;
584 	ia.ia_tid = I2O_TID_IOP;
585 	locs[IOPCF_TID] = I2O_TID_IOP;
586 	config_found(self, &ia, iop_print,
587 	    CFARGS(.submatch = config_stdsubmatch,
588 		   .locators = locs));
589 
590 	/*
591 	 * Start device configuration.
592 	 */
593 	if ((rv = iop_reconfigure(sc, 0)) == -1)
594 		aprint_error_dev(sc->sc_dev, "configure failed (%d)\n", rv);
595 
596 
597 	sc->sc_flags |= IOP_ONLINE;
598 	rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
599 	    &sc->sc_reconf_thread, "%s", device_xname(sc->sc_dev));
600 	mutex_exit(&sc->sc_conflock);
601  	if (rv != 0) {
602 		aprint_error_dev(sc->sc_dev, "unable to create reconfiguration thread (%d)", rv);
603  		return;
604  	}
605 }
606 
607 /*
608  * Reconfiguration thread; listens for LCT change notification, and
609  * initiates re-configuration if received.
610  */
611 static void
iop_reconf_thread(void * cookie)612 iop_reconf_thread(void *cookie)
613 {
614 	struct iop_softc *sc;
615 	struct i2o_lct lct;
616 	u_int32_t chgind;
617 	int rv;
618 
619 	sc = cookie;
620 	chgind = sc->sc_chgind + 1;
621 
622 	for (;;) {
623 		DPRINTF(("%s: async reconfig: requested 0x%08x\n",
624 		    device_xname(sc->sc_dev), chgind));
625 
626 		rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
627 
628 		DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
629 		    device_xname(sc->sc_dev), le32toh(lct.changeindicator), rv));
630 
631 		mutex_enter(&sc->sc_conflock);
632 		if (rv == 0) {
633 			iop_reconfigure(sc, le32toh(lct.changeindicator));
634 			chgind = sc->sc_chgind + 1;
635 		}
636 		(void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
637 		mutex_exit(&sc->sc_conflock);
638 	}
639 }
640 
641 /*
642  * Reconfigure: find new and removed devices.
643  */
644 int
iop_reconfigure(struct iop_softc * sc,u_int chgind)645 iop_reconfigure(struct iop_softc *sc, u_int chgind)
646 {
647 	struct iop_msg *im;
648 	struct i2o_hba_bus_scan mf;
649 	struct i2o_lct_entry *le;
650 	struct iop_initiator *ii, *nextii;
651 	int rv, tid, i;
652 
653 	KASSERT(mutex_owned(&sc->sc_conflock));
654 
655 	/*
656 	 * If the reconfiguration request isn't the result of LCT change
657 	 * notification, then be more thorough: ask all bus ports to scan
658 	 * their busses.  Wait up to 5 minutes for each bus port to complete
659 	 * the request.
660 	 */
661 	if (chgind == 0) {
662 		if ((rv = iop_lct_get(sc)) != 0) {
663 			DPRINTF(("iop_reconfigure: unable to read LCT\n"));
664 			return (rv);
665 		}
666 
667 		le = sc->sc_lct->entry;
668 		for (i = 0; i < sc->sc_nlctent; i++, le++) {
669 			if ((le16toh(le->classid) & 4095) !=
670 			    I2O_CLASS_BUS_ADAPTER_PORT)
671 				continue;
672 			tid = le16toh(le->localtid) & 4095;
673 
674 			im = iop_msg_alloc(sc, IM_WAIT);
675 
676 			mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
677 			mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
678 			mf.msgictx = IOP_ICTX;
679 			mf.msgtctx = im->im_tctx;
680 
681 			DPRINTF(("%s: scanning bus %d\n", device_xname(sc->sc_dev),
682 			    tid));
683 
684 			rv = iop_msg_post(sc, im, &mf, 5*60*1000);
685 			iop_msg_free(sc, im);
686 #ifdef I2ODEBUG
687 			if (rv != 0)
688 				aprint_error_dev(sc->sc_dev, "bus scan failed\n");
689 #endif
690 		}
691 	} else if (chgind <= sc->sc_chgind) {
692 		DPRINTF(("%s: LCT unchanged (async)\n", device_xname(sc->sc_dev)));
693 		return (0);
694 	}
695 
696 	/* Re-read the LCT and determine if it has changed. */
697 	if ((rv = iop_lct_get(sc)) != 0) {
698 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
699 		return (rv);
700 	}
701 	DPRINTF(("%s: %d LCT entries\n", device_xname(sc->sc_dev), sc->sc_nlctent));
702 
703 	chgind = le32toh(sc->sc_lct->changeindicator);
704 	if (chgind == sc->sc_chgind) {
705 		DPRINTF(("%s: LCT unchanged\n", device_xname(sc->sc_dev)));
706 		return (0);
707 	}
708 	DPRINTF(("%s: LCT changed\n", device_xname(sc->sc_dev)));
709 	sc->sc_chgind = chgind;
710 
711 	if (sc->sc_tidmap != NULL)
712 		free(sc->sc_tidmap, M_DEVBUF);
713 	sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
714 	    M_DEVBUF, M_WAITOK|M_ZERO);
715 
716 	/* Allow 1 queued command per device while we're configuring. */
717 	iop_adjqparam(sc, 1);
718 
719 	/*
720 	 * Match and attach child devices.  We configure high-level devices
721 	 * first so that any claims will propagate throughout the LCT,
722 	 * hopefully masking off aliased devices as a result.
723 	 *
724 	 * Re-reading the LCT at this point is a little dangerous, but we'll
725 	 * trust the IOP (and the operator) to behave itself...
726 	 */
727 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
728 	    IC_CONFIGURE | IC_PRIORITY);
729 	if ((rv = iop_lct_get(sc)) != 0) {
730 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
731 	}
732 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
733 	    IC_CONFIGURE);
734 
735 	for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
736 		nextii = LIST_NEXT(ii, ii_list);
737 
738 		/* Detach devices that were configured, but are now gone. */
739 		for (i = 0; i < sc->sc_nlctent; i++)
740 			if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
741 				break;
742 		if (i == sc->sc_nlctent ||
743 		    (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
744 			config_detach(ii->ii_dv, DETACH_FORCE);
745 			continue;
746 		}
747 
748 		/*
749 		 * Tell initiators that existed before the re-configuration
750 		 * to re-configure.
751 		 */
752 		if (ii->ii_reconfig == NULL)
753 			continue;
754 		if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
755 			aprint_error_dev(sc->sc_dev, "%s failed reconfigure (%d)\n",
756 			    device_xname(ii->ii_dv), rv);
757 	}
758 
759 	/* Re-adjust queue parameters and return. */
760 	if (sc->sc_nii != 0)
761 		iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
762 		    / sc->sc_nii);
763 
764 	return (0);
765 }
766 
767 /*
768  * Configure I2O devices into the system.
769  */
770 static void
iop_configure_devices(struct iop_softc * sc,int mask,int maskval)771 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
772 {
773 	struct iop_attach_args ia;
774 	struct iop_initiator *ii;
775 	const struct i2o_lct_entry *le;
776 	device_t dv;
777 	int i, j, nent;
778 	u_int usertid;
779 	int locs[IOPCF_NLOCS];
780 
781 	nent = sc->sc_nlctent;
782 	for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
783 		sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
784 
785 		/* Ignore the device if it's in use. */
786 		usertid = le32toh(le->usertid) & 4095;
787 		if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
788 			continue;
789 
790 		ia.ia_class = le16toh(le->classid) & 4095;
791 		ia.ia_tid = sc->sc_tidmap[i].it_tid;
792 
793 		/* Ignore uninteresting devices. */
794 		for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
795 			if (iop_class[j].ic_class == ia.ia_class)
796 				break;
797 		if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
798 		    (iop_class[j].ic_flags & mask) != maskval)
799 			continue;
800 
801 		/*
802 		 * Try to configure the device only if it's not already
803 		 * configured.
804  		 */
805  		LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
806  			if (ia.ia_tid == ii->ii_tid) {
807 				sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
808 				strcpy(sc->sc_tidmap[i].it_dvname,
809 				    device_xname(ii->ii_dv));
810  				break;
811 			}
812 		}
813 		if (ii != NULL)
814 			continue;
815 
816 		locs[IOPCF_TID] = ia.ia_tid;
817 
818 		dv = config_found(sc->sc_dev, &ia, iop_print,
819 		    CFARGS(.submatch = config_stdsubmatch,
820 			   .locators = locs));
821 		if (dv != NULL) {
822  			sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
823 			strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
824 		}
825 	}
826 }
827 
828 /*
829  * Adjust queue parameters for all child devices.
830  */
831 static void
iop_adjqparam(struct iop_softc * sc,int mpi)832 iop_adjqparam(struct iop_softc *sc, int mpi)
833 {
834 	struct iop_initiator *ii;
835 
836 	LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
837 		if (ii->ii_adjqparam != NULL)
838 			(*ii->ii_adjqparam)(ii->ii_dv, mpi);
839 }
840 
841 static void
iop_devinfo(int class,char * devinfo,size_t l)842 iop_devinfo(int class, char *devinfo, size_t l)
843 {
844 	int i;
845 
846 	for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
847 		if (class == iop_class[i].ic_class)
848 			break;
849 
850 	if (i == sizeof(iop_class) / sizeof(iop_class[0]))
851 		snprintf(devinfo, l, "device (class 0x%x)", class);
852 	else
853 		strlcpy(devinfo, iop_class[i].ic_caption, l);
854 }
855 
856 static int
iop_print(void * aux,const char * pnp)857 iop_print(void *aux, const char *pnp)
858 {
859 	struct iop_attach_args *ia;
860 	char devinfo[256];
861 
862 	ia = aux;
863 
864 	if (pnp != NULL) {
865 		iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
866 		aprint_normal("%s at %s", devinfo, pnp);
867 	}
868 	aprint_normal(" tid %d", ia->ia_tid);
869 	return (UNCONF);
870 }
871 
872 /*
873  * Shut down all configured IOPs.
874  */
875 static void
iop_shutdown(void * junk)876 iop_shutdown(void *junk)
877 {
878 	struct iop_softc *sc;
879 	int i;
880 
881 	printf("shutting down iop devices...");
882 
883 	for (i = 0; i < iop_cd.cd_ndevs; i++) {
884 		if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
885 			continue;
886 		if ((sc->sc_flags & IOP_ONLINE) == 0)
887 			continue;
888 
889 		iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
890 		    0, 5000);
891 
892 		if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
893 			/*
894 			 * Some AMI firmware revisions will go to sleep and
895 			 * never come back after this.
896 			 */
897 			iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
898 			    IOP_ICTX, 0, 1000);
899 		}
900 	}
901 
902 	/* Wait.  Some boards could still be flushing, stupidly enough. */
903 	delay(5000*1000);
904 	printf(" done\n");
905 }
906 
907 /*
908  * Retrieve IOP status.
909  */
910 int
iop_status_get(struct iop_softc * sc,int nosleep)911 iop_status_get(struct iop_softc *sc, int nosleep)
912 {
913 	struct i2o_exec_status_get mf;
914 	struct i2o_status *st;
915 	paddr_t pa;
916 	int rv, i;
917 
918 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
919 	st = (struct i2o_status *)sc->sc_scr;
920 
921 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
922 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
923 	mf.reserved[0] = 0;
924 	mf.reserved[1] = 0;
925 	mf.reserved[2] = 0;
926 	mf.reserved[3] = 0;
927 	mf.addrlow = (u_int32_t)pa;
928 	mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
929 	mf.length = sizeof(sc->sc_status);
930 
931 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
932 	    BUS_DMASYNC_PREWRITE);
933 	memset(st, 0, sizeof(*st));
934 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
935 	    BUS_DMASYNC_POSTWRITE);
936 
937 	if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
938 		return (rv);
939 
940 	for (i = 100; i != 0; i--) {
941 		bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
942 		    sizeof(*st), BUS_DMASYNC_POSTREAD);
943 		if (st->syncbyte == 0xff)
944 			break;
945 		if (nosleep)
946 			DELAY(100*1000);
947 		else
948 			kpause("iopstat", false, hz / 10, NULL);
949 	}
950 
951 	if (st->syncbyte != 0xff) {
952 		aprint_error_dev(sc->sc_dev, "STATUS_GET timed out\n");
953 		rv = EIO;
954 	} else {
955 		memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
956 		rv = 0;
957 	}
958 
959 	return (rv);
960 }
961 
962 /*
963  * Initialize and populate the IOP's outbound FIFO.
964  */
965 static int
iop_ofifo_init(struct iop_softc * sc)966 iop_ofifo_init(struct iop_softc *sc)
967 {
968 	bus_addr_t addr;
969 	bus_dma_segment_t seg;
970 	struct i2o_exec_outbound_init *mf;
971 	int i, rseg, rv;
972 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
973 
974 	sw = (u_int32_t *)sc->sc_scr;
975 
976 	mf = (struct i2o_exec_outbound_init *)mb;
977 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
978 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
979 	mf->msgictx = IOP_ICTX;
980 	mf->msgtctx = 0;
981 	mf->pagesize = PAGE_SIZE;
982 	mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
983 
984 	/*
985 	 * The I2O spec says that there are two SGLs: one for the status
986 	 * word, and one for a list of discarded MFAs.  It continues to say
987 	 * that if you don't want to get the list of MFAs, an IGNORE SGL is
988 	 * necessary; this isn't the case (and is in fact a bad thing).
989 	 */
990 	mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
991 	    I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
992 	mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
993 	    (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
994 	mb[0] += 2 << 16;
995 
996 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
997 	    BUS_DMASYNC_POSTWRITE);
998 	*sw = 0;
999 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1000 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1001 
1002 	if ((rv = iop_post(sc, mb)) != 0)
1003 		return (rv);
1004 
1005 	POLL(5000,
1006 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1007 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD),
1008 	    *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1009 
1010 	if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1011 		aprint_error_dev(sc->sc_dev, "outbound FIFO init failed (%d)\n",
1012 		    le32toh(*sw));
1013 		return (EIO);
1014 	}
1015 
1016 	/* Allocate DMA safe memory for the reply frames. */
1017 	if (sc->sc_rep_phys == 0) {
1018 		sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1019 
1020 		rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1021 		    0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1022 		if (rv != 0) {
1023 			aprint_error_dev(sc->sc_dev, "DMA alloc = %d\n",
1024 			   rv);
1025 			return (rv);
1026 		}
1027 
1028 		rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1029 		    &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1030 		if (rv != 0) {
1031 			aprint_error_dev(sc->sc_dev, "DMA map = %d\n", rv);
1032 			return (rv);
1033 		}
1034 
1035 		rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1036 		    sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1037 		if (rv != 0) {
1038 			aprint_error_dev(sc->sc_dev, "DMA create = %d\n", rv);
1039 			return (rv);
1040 		}
1041 
1042 		rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1043 		    sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1044 		if (rv != 0) {
1045 			aprint_error_dev(sc->sc_dev, "DMA load = %d\n", rv);
1046 			return (rv);
1047 		}
1048 
1049 		sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1050 
1051 		/* Now safe to sync the reply map. */
1052 		sc->sc_curib = 0;
1053 	}
1054 
1055 	/* Populate the outbound FIFO. */
1056 	for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1057 		iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1058 		addr += sc->sc_framesize;
1059 	}
1060 
1061 	return (0);
1062 }
1063 
1064 /*
1065  * Read the specified number of bytes from the IOP's hardware resource table.
1066  */
1067 static int
iop_hrt_get0(struct iop_softc * sc,struct i2o_hrt * hrt,int size)1068 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1069 {
1070 	struct iop_msg *im;
1071 	int rv;
1072 	struct i2o_exec_hrt_get *mf;
1073 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1074 
1075 	im = iop_msg_alloc(sc, IM_WAIT);
1076 	mf = (struct i2o_exec_hrt_get *)mb;
1077 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1078 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1079 	mf->msgictx = IOP_ICTX;
1080 	mf->msgtctx = im->im_tctx;
1081 
1082 	iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1083 	rv = iop_msg_post(sc, im, mb, 30000);
1084 	iop_msg_unmap(sc, im);
1085 	iop_msg_free(sc, im);
1086 	return (rv);
1087 }
1088 
1089 /*
1090  * Read the IOP's hardware resource table.
1091  */
1092 static int
iop_hrt_get(struct iop_softc * sc)1093 iop_hrt_get(struct iop_softc *sc)
1094 {
1095 	struct i2o_hrt hrthdr, *hrt;
1096 	int size, rv;
1097 
1098 	rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1099 	if (rv != 0)
1100 		return (rv);
1101 
1102 	DPRINTF(("%s: %d hrt entries\n", device_xname(sc->sc_dev),
1103 	    le16toh(hrthdr.numentries)));
1104 
1105 	size = sizeof(struct i2o_hrt) +
1106 	    (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1107 	hrt = malloc(size, M_DEVBUF, M_WAITOK);
1108 	if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1109 		free(hrt, M_DEVBUF);
1110 		return (rv);
1111 	}
1112 
1113 	if (sc->sc_hrt != NULL)
1114 		free(sc->sc_hrt, M_DEVBUF);
1115 	sc->sc_hrt = hrt;
1116 	return (0);
1117 }
1118 
1119 /*
1120  * Request the specified number of bytes from the IOP's logical
1121  * configuration table.  If a change indicator is specified, this
1122  * is a verbatim notification request, so the caller is prepared
1123  * to wait indefinitely.
1124  */
1125 static int
iop_lct_get0(struct iop_softc * sc,struct i2o_lct * lct,int size,u_int32_t chgind)1126 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1127 	     u_int32_t chgind)
1128 {
1129 	struct iop_msg *im;
1130 	struct i2o_exec_lct_notify *mf;
1131 	int rv;
1132 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1133 
1134 	im = iop_msg_alloc(sc, IM_WAIT);
1135 	memset(lct, 0, size);
1136 
1137 	mf = (struct i2o_exec_lct_notify *)mb;
1138 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1139 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1140 	mf->msgictx = IOP_ICTX;
1141 	mf->msgtctx = im->im_tctx;
1142 	mf->classid = I2O_CLASS_ANY;
1143 	mf->changeindicator = chgind;
1144 
1145 #ifdef I2ODEBUG
1146 	printf("iop_lct_get0: reading LCT");
1147 	if (chgind != 0)
1148 		printf(" (async)");
1149 	printf("\n");
1150 #endif
1151 
1152 	iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1153 	rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1154 	iop_msg_unmap(sc, im);
1155 	iop_msg_free(sc, im);
1156 	return (rv);
1157 }
1158 
1159 /*
1160  * Read the IOP's logical configuration table.
1161  */
1162 int
iop_lct_get(struct iop_softc * sc)1163 iop_lct_get(struct iop_softc *sc)
1164 {
1165 	int esize, size, rv;
1166 	struct i2o_lct *lct;
1167 
1168 	esize = le32toh(sc->sc_status.expectedlctsize);
1169 	lct = malloc(esize, M_DEVBUF, M_WAITOK);
1170 	if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1171 		free(lct, M_DEVBUF);
1172 		return (rv);
1173 	}
1174 
1175 	size = le16toh(lct->tablesize) << 2;
1176 	if (esize != size) {
1177 		free(lct, M_DEVBUF);
1178 		lct = malloc(size, M_DEVBUF, M_WAITOK);
1179 		if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1180 			free(lct, M_DEVBUF);
1181 			return (rv);
1182 		}
1183 	}
1184 
1185 	/* Swap in the new LCT. */
1186 	if (sc->sc_lct != NULL)
1187 		free(sc->sc_lct, M_DEVBUF);
1188 	sc->sc_lct = lct;
1189 	sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1190 	    sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1191 	    sizeof(struct i2o_lct_entry);
1192 	return (0);
1193 }
1194 
1195 /*
1196  * Post a SYS_ENABLE message to the adapter.
1197  */
1198 int
iop_sys_enable(struct iop_softc * sc)1199 iop_sys_enable(struct iop_softc *sc)
1200 {
1201 	struct iop_msg *im;
1202 	struct i2o_msg mf;
1203 	int rv;
1204 
1205 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1206 
1207 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1208 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1209 	mf.msgictx = IOP_ICTX;
1210 	mf.msgtctx = im->im_tctx;
1211 
1212 	rv = iop_msg_post(sc, im, &mf, 30000);
1213 	if (rv == 0) {
1214 		if ((im->im_flags & IM_FAIL) != 0)
1215 			rv = ENXIO;
1216 		else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1217 		    (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1218 		    im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1219 			rv = 0;
1220 		else
1221 			rv = EIO;
1222 	}
1223 
1224 	iop_msg_free(sc, im);
1225 	return (rv);
1226 }
1227 
1228 /*
1229  * Request the specified parameter group from the target.  If an initiator
1230  * is specified (a) don't wait for the operation to complete, but instead
1231  * let the initiator's interrupt handler deal with the reply and (b) place a
1232  * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1233  */
1234 int
iop_field_get_all(struct iop_softc * sc,int tid,int group,void * buf,int size,struct iop_initiator * ii)1235 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1236 		  int size, struct iop_initiator *ii)
1237 {
1238 	struct iop_msg *im;
1239 	struct i2o_util_params_op *mf;
1240 	int rv;
1241 	struct iop_pgop *pgop;
1242 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1243 
1244 	im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1245 	pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK);
1246 	im->im_dvcontext = pgop;
1247 
1248 	mf = (struct i2o_util_params_op *)mb;
1249 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1250 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1251 	mf->msgictx = IOP_ICTX;
1252 	mf->msgtctx = im->im_tctx;
1253 	mf->flags = 0;
1254 
1255 	pgop->olh.count = htole16(1);
1256 	pgop->olh.reserved = htole16(0);
1257 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1258 	pgop->oat.fieldcount = htole16(0xffff);
1259 	pgop->oat.group = htole16(group);
1260 
1261 	memset(buf, 0, size);
1262 	iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1263 	iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1264 	rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1265 
1266 	/* Detect errors; let partial transfers to count as success. */
1267 	if (ii == NULL && rv == 0) {
1268 		if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1269 		    im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1270 			rv = 0;
1271 		else
1272 			rv = (im->im_reqstatus != 0 ? EIO : 0);
1273 
1274 		if (rv != 0)
1275 			printf("%s: FIELD_GET failed for tid %d group %d\n",
1276 			    device_xname(sc->sc_dev), tid, group);
1277 	}
1278 
1279 	if (ii == NULL || rv != 0) {
1280 		iop_msg_unmap(sc, im);
1281 		iop_msg_free(sc, im);
1282 		free(pgop, M_DEVBUF);
1283 	}
1284 
1285 	return (rv);
1286 }
1287 
1288 /*
1289  * Set a single field in a scalar parameter group.
1290  */
1291 int
iop_field_set(struct iop_softc * sc,int tid,int group,void * buf,int size,int field)1292 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1293 	      int size, int field)
1294 {
1295 	struct iop_msg *im;
1296 	struct i2o_util_params_op *mf;
1297 	struct iop_pgop *pgop;
1298 	int rv, totsize;
1299 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1300 
1301 	totsize = sizeof(*pgop) + size;
1302 
1303 	im = iop_msg_alloc(sc, IM_WAIT);
1304 	pgop = malloc(totsize, M_DEVBUF, M_WAITOK);
1305 	mf = (struct i2o_util_params_op *)mb;
1306 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1307 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1308 	mf->msgictx = IOP_ICTX;
1309 	mf->msgtctx = im->im_tctx;
1310 	mf->flags = 0;
1311 
1312 	pgop->olh.count = htole16(1);
1313 	pgop->olh.reserved = htole16(0);
1314 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1315 	pgop->oat.fieldcount = htole16(1);
1316 	pgop->oat.group = htole16(group);
1317 	pgop->oat.fields[0] = htole16(field);
1318 	memcpy(pgop + 1, buf, size);
1319 
1320 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1321 	rv = iop_msg_post(sc, im, mb, 30000);
1322 	if (rv != 0)
1323 		aprint_error_dev(sc->sc_dev, "FIELD_SET failed for tid %d group %d\n",
1324 		    tid, group);
1325 
1326 	iop_msg_unmap(sc, im);
1327 	iop_msg_free(sc, im);
1328 	free(pgop, M_DEVBUF);
1329 	return (rv);
1330 }
1331 
1332 /*
1333  * Delete all rows in a tablular parameter group.
1334  */
1335 int
iop_table_clear(struct iop_softc * sc,int tid,int group)1336 iop_table_clear(struct iop_softc *sc, int tid, int group)
1337 {
1338 	struct iop_msg *im;
1339 	struct i2o_util_params_op *mf;
1340 	struct iop_pgop pgop;
1341 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1342 	int rv;
1343 
1344 	im = iop_msg_alloc(sc, IM_WAIT);
1345 
1346 	mf = (struct i2o_util_params_op *)mb;
1347 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1348 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1349 	mf->msgictx = IOP_ICTX;
1350 	mf->msgtctx = im->im_tctx;
1351 	mf->flags = 0;
1352 
1353 	pgop.olh.count = htole16(1);
1354 	pgop.olh.reserved = htole16(0);
1355 	pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1356 	pgop.oat.fieldcount = htole16(0);
1357 	pgop.oat.group = htole16(group);
1358 	pgop.oat.fields[0] = htole16(0);
1359 
1360 	iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1361 	rv = iop_msg_post(sc, im, mb, 30000);
1362 	if (rv != 0)
1363 		aprint_error_dev(sc->sc_dev, "TABLE_CLEAR failed for tid %d group %d\n",
1364 		    tid, group);
1365 
1366 	iop_msg_unmap(sc, im);
1367 	iop_msg_free(sc, im);
1368 	return (rv);
1369 }
1370 
1371 /*
1372  * Add a single row to a tabular parameter group.  The row can have only one
1373  * field.
1374  */
1375 int
iop_table_add_row(struct iop_softc * sc,int tid,int group,void * buf,int size,int row)1376 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1377 		  int size, int row)
1378 {
1379 	struct iop_msg *im;
1380 	struct i2o_util_params_op *mf;
1381 	struct iop_pgop *pgop;
1382 	int rv, totsize;
1383 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1384 
1385 	totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1386 
1387 	im = iop_msg_alloc(sc, IM_WAIT);
1388 	pgop = malloc(totsize, M_DEVBUF, M_WAITOK);
1389 	mf = (struct i2o_util_params_op *)mb;
1390 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1391 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1392 	mf->msgictx = IOP_ICTX;
1393 	mf->msgtctx = im->im_tctx;
1394 	mf->flags = 0;
1395 
1396 	pgop->olh.count = htole16(1);
1397 	pgop->olh.reserved = htole16(0);
1398 	pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1399 	pgop->oat.fieldcount = htole16(1);
1400 	pgop->oat.group = htole16(group);
1401 	pgop->oat.fields[0] = htole16(0);	/* FieldIdx */
1402 	pgop->oat.fields[1] = htole16(1);	/* RowCount */
1403 	pgop->oat.fields[2] = htole16(row);	/* KeyValue */
1404 	memcpy(&pgop->oat.fields[3], buf, size);
1405 
1406 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1407 	rv = iop_msg_post(sc, im, mb, 30000);
1408 	if (rv != 0)
1409 		aprint_error_dev(sc->sc_dev, "ADD_ROW failed for tid %d group %d row %d\n",
1410 		    tid, group, row);
1411 
1412 	iop_msg_unmap(sc, im);
1413 	iop_msg_free(sc, im);
1414 	free(pgop, M_DEVBUF);
1415 	return (rv);
1416 }
1417 
1418 /*
1419  * Execute a simple command (no parameters).
1420  */
1421 int
iop_simple_cmd(struct iop_softc * sc,int tid,int function,int ictx,int async,int timo)1422 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1423 	       int async, int timo)
1424 {
1425 	struct iop_msg *im;
1426 	struct i2o_msg mf;
1427 	int rv, fl;
1428 
1429 	fl = (async != 0 ? IM_WAIT : IM_POLL);
1430 	im = iop_msg_alloc(sc, fl);
1431 
1432 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1433 	mf.msgfunc = I2O_MSGFUNC(tid, function);
1434 	mf.msgictx = ictx;
1435 	mf.msgtctx = im->im_tctx;
1436 
1437 	rv = iop_msg_post(sc, im, &mf, timo);
1438 	iop_msg_free(sc, im);
1439 	return (rv);
1440 }
1441 
1442 /*
1443  * Post the system table to the IOP.
1444  */
1445 static int
iop_systab_set(struct iop_softc * sc)1446 iop_systab_set(struct iop_softc *sc)
1447 {
1448 	struct i2o_exec_sys_tab_set *mf;
1449 	struct iop_msg *im;
1450 	bus_space_handle_t bsh;
1451 	bus_addr_t boo;
1452 	u_int32_t mema[2], ioa[2];
1453 	int rv;
1454 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1455 
1456 	im = iop_msg_alloc(sc, IM_WAIT);
1457 
1458 	mf = (struct i2o_exec_sys_tab_set *)mb;
1459 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1460 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1461 	mf->msgictx = IOP_ICTX;
1462 	mf->msgtctx = im->im_tctx;
1463 	mf->iopid = (device_unit(sc->sc_dev) + 2) << 12;
1464 	mf->segnumber = 0;
1465 
1466 	mema[1] = sc->sc_status.desiredprivmemsize;
1467 	ioa[1] = sc->sc_status.desiredpriviosize;
1468 
1469 	if (mema[1] != 0) {
1470 		rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1471 		    le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1472 		mema[0] = htole32(boo);
1473 		if (rv != 0) {
1474 			aprint_error_dev(sc->sc_dev, "can't alloc priv mem space, err = %d\n", rv);
1475 			mema[0] = 0;
1476 			mema[1] = 0;
1477 		}
1478 	}
1479 
1480 	if (ioa[1] != 0) {
1481 		rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1482 		    le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1483 		ioa[0] = htole32(boo);
1484 		if (rv != 0) {
1485 			aprint_error_dev(sc->sc_dev, "can't alloc priv i/o space, err = %d\n", rv);
1486 			ioa[0] = 0;
1487 			ioa[1] = 0;
1488 		}
1489 	}
1490 
1491 	iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1492 	iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1493 	iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1494 	rv = iop_msg_post(sc, im, mb, 5000);
1495 	iop_msg_unmap(sc, im);
1496 	iop_msg_free(sc, im);
1497 	return (rv);
1498 }
1499 
1500 /*
1501  * Reset the IOP.  Must be called with interrupts disabled.
1502  */
1503 static int
iop_reset(struct iop_softc * sc)1504 iop_reset(struct iop_softc *sc)
1505 {
1506 	u_int32_t mfa, *sw;
1507 	struct i2o_exec_iop_reset mf;
1508 	int rv;
1509 	paddr_t pa;
1510 
1511 	sw = (u_int32_t *)sc->sc_scr;
1512 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
1513 
1514 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1515 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1516 	mf.reserved[0] = 0;
1517 	mf.reserved[1] = 0;
1518 	mf.reserved[2] = 0;
1519 	mf.reserved[3] = 0;
1520 	mf.statuslow = (u_int32_t)pa;
1521 	mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1522 
1523 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1524 	    BUS_DMASYNC_POSTWRITE);
1525 	*sw = htole32(0);
1526 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1527 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1528 
1529 	if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1530 		return (rv);
1531 
1532 	POLL(2500,
1533 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1534 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), *sw != 0));
1535 	if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1536 		aprint_error_dev(sc->sc_dev, "reset rejected, status 0x%x\n",
1537 		    le32toh(*sw));
1538 		return (EIO);
1539 	}
1540 
1541 	/*
1542 	 * IOP is now in the INIT state.  Wait no more than 10 seconds for
1543 	 * the inbound queue to become responsive.
1544 	 */
1545 	POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1546 	if (mfa == IOP_MFA_EMPTY) {
1547 		aprint_error_dev(sc->sc_dev, "reset failed\n");
1548 		return (EIO);
1549 	}
1550 
1551 	iop_release_mfa(sc, mfa);
1552 	return (0);
1553 }
1554 
1555 /*
1556  * Register a new initiator.  Must be called with the configuration lock
1557  * held.
1558  */
1559 void
iop_initiator_register(struct iop_softc * sc,struct iop_initiator * ii)1560 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1561 {
1562 	static int ictxgen;
1563 
1564 	/* 0 is reserved (by us) for system messages. */
1565 	ii->ii_ictx = ++ictxgen;
1566 
1567 	/*
1568 	 * `Utility initiators' don't make it onto the per-IOP initiator list
1569 	 * (which is used only for configuration), but do get one slot on
1570 	 * the inbound queue.
1571 	 */
1572 	if ((ii->ii_flags & II_UTILITY) == 0) {
1573 		LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1574 		sc->sc_nii++;
1575 	} else
1576 		sc->sc_nuii++;
1577 
1578 	cv_init(&ii->ii_cv, "iopevt");
1579 
1580 	mutex_spin_enter(&sc->sc_intrlock);
1581 	LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1582 	mutex_spin_exit(&sc->sc_intrlock);
1583 }
1584 
1585 /*
1586  * Unregister an initiator.  Must be called with the configuration lock
1587  * held.
1588  */
1589 void
iop_initiator_unregister(struct iop_softc * sc,struct iop_initiator * ii)1590 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1591 {
1592 
1593 	if ((ii->ii_flags & II_UTILITY) == 0) {
1594 		LIST_REMOVE(ii, ii_list);
1595 		sc->sc_nii--;
1596 	} else
1597 		sc->sc_nuii--;
1598 
1599 	mutex_spin_enter(&sc->sc_intrlock);
1600 	LIST_REMOVE(ii, ii_hash);
1601 	mutex_spin_exit(&sc->sc_intrlock);
1602 
1603 	cv_destroy(&ii->ii_cv);
1604 }
1605 
1606 /*
1607  * Handle a reply frame from the IOP.
1608  */
1609 static int
iop_handle_reply(struct iop_softc * sc,u_int32_t rmfa)1610 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1611 {
1612 	struct iop_msg *im;
1613 	struct i2o_reply *rb;
1614 	struct i2o_fault_notify *fn;
1615 	struct iop_initiator *ii;
1616 	u_int off, ictx, tctx, status, size;
1617 
1618 	KASSERT(mutex_owned(&sc->sc_intrlock));
1619 
1620 	off = (int)(rmfa - sc->sc_rep_phys);
1621 	rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1622 
1623 	/* Perform reply queue DMA synchronisation. */
1624 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1625 	    sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1626 
1627 #ifdef I2ODEBUG
1628 	if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1629 		panic("iop_handle_reply: 64-bit reply");
1630 #endif
1631 	/*
1632 	 * Find the initiator.
1633 	 */
1634 	ictx = le32toh(rb->msgictx);
1635 	if (ictx == IOP_ICTX)
1636 		ii = NULL;
1637 	else {
1638 		ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1639 		for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1640 			if (ii->ii_ictx == ictx)
1641 				break;
1642 		if (ii == NULL) {
1643 #ifdef I2ODEBUG
1644 			iop_reply_print(sc, rb);
1645 #endif
1646 			aprint_error_dev(sc->sc_dev, "WARNING: bad ictx returned (%x)\n",
1647 			    ictx);
1648 			return (-1);
1649 		}
1650 	}
1651 
1652 	/*
1653 	 * If we received a transport failure notice, we've got to dig the
1654 	 * transaction context (if any) out of the original message frame,
1655 	 * and then release the original MFA back to the inbound FIFO.
1656 	 */
1657 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1658 		status = I2O_STATUS_SUCCESS;
1659 
1660 		fn = (struct i2o_fault_notify *)rb;
1661 		tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1662 		iop_release_mfa(sc, fn->lowmfa);
1663 		iop_tfn_print(sc, fn);
1664 	} else {
1665 		status = rb->reqstatus;
1666 		tctx = le32toh(rb->msgtctx);
1667 	}
1668 
1669 	if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1670 		/*
1671 		 * This initiator tracks state using message wrappers.
1672 		 *
1673 		 * Find the originating message wrapper, and if requested
1674 		 * notify the initiator.
1675 		 */
1676 		im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1677 		if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1678 		    (im->im_flags & IM_ALLOCED) == 0 ||
1679 		    tctx != im->im_tctx) {
1680 			aprint_error_dev(sc->sc_dev, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1681 			if (im != NULL)
1682 				aprint_error_dev(sc->sc_dev, "flags=0x%08x tctx=0x%08x\n",
1683 				    im->im_flags, im->im_tctx);
1684 #ifdef I2ODEBUG
1685 			if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1686 				iop_reply_print(sc, rb);
1687 #endif
1688 			return (-1);
1689 		}
1690 
1691 		if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1692 			im->im_flags |= IM_FAIL;
1693 
1694 #ifdef I2ODEBUG
1695 		if ((im->im_flags & IM_REPLIED) != 0)
1696 			panic("%s: dup reply", device_xname(sc->sc_dev));
1697 #endif
1698 		im->im_flags |= IM_REPLIED;
1699 
1700 #ifdef I2ODEBUG
1701 		if (status != I2O_STATUS_SUCCESS)
1702 			iop_reply_print(sc, rb);
1703 #endif
1704 		im->im_reqstatus = status;
1705 		im->im_detstatus = le16toh(rb->detail);
1706 
1707 		/* Copy the reply frame, if requested. */
1708 		if (im->im_rb != NULL) {
1709 			size = (le32toh(rb->msgflags) >> 14) & ~3;
1710 #ifdef I2ODEBUG
1711 			if (size > sc->sc_framesize)
1712 				panic("iop_handle_reply: reply too large");
1713 #endif
1714 			memcpy(im->im_rb, rb, size);
1715 		}
1716 
1717 		/* Notify the initiator. */
1718 		if ((im->im_flags & IM_WAIT) != 0)
1719 			cv_broadcast(&im->im_cv);
1720 		else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1721 			if (ii != NULL) {
1722 				mutex_spin_exit(&sc->sc_intrlock);
1723 				(*ii->ii_intr)(ii->ii_dv, im, rb);
1724 				mutex_spin_enter(&sc->sc_intrlock);
1725 			}
1726 		}
1727 	} else {
1728 		/*
1729 		 * This initiator discards message wrappers.
1730 		 *
1731 		 * Simply pass the reply frame to the initiator.
1732 		 */
1733 		if (ii != NULL) {
1734 			mutex_spin_exit(&sc->sc_intrlock);
1735 			(*ii->ii_intr)(ii->ii_dv, NULL, rb);
1736 			mutex_spin_enter(&sc->sc_intrlock);
1737 		}
1738 	}
1739 
1740 	return (status);
1741 }
1742 
1743 /*
1744  * Handle an interrupt from the IOP.
1745  */
1746 int
iop_intr(void * arg)1747 iop_intr(void *arg)
1748 {
1749 	struct iop_softc *sc;
1750 	u_int32_t rmfa;
1751 
1752 	sc = arg;
1753 
1754 	mutex_spin_enter(&sc->sc_intrlock);
1755 
1756 	if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1757 		mutex_spin_exit(&sc->sc_intrlock);
1758 		return (0);
1759 	}
1760 
1761 	for (;;) {
1762 		/* Double read to account for IOP bug. */
1763 		if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1764 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
1765 			if (rmfa == IOP_MFA_EMPTY)
1766 				break;
1767 		}
1768 		iop_handle_reply(sc, rmfa);
1769 		iop_outl(sc, IOP_REG_OFIFO, rmfa);
1770 	}
1771 
1772 	mutex_spin_exit(&sc->sc_intrlock);
1773 	return (1);
1774 }
1775 
1776 /*
1777  * Handle an event signalled by the executive.
1778  */
1779 static void
iop_intr_event(device_t dv,struct iop_msg * im,void * reply)1780 iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
1781 {
1782 	struct i2o_util_event_register_reply *rb;
1783 	u_int event;
1784 
1785 	rb = reply;
1786 
1787 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1788 		return;
1789 
1790 	event = le32toh(rb->event);
1791 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
1792 }
1793 
1794 /*
1795  * Allocate a message wrapper.
1796  */
1797 struct iop_msg *
iop_msg_alloc(struct iop_softc * sc,int flags)1798 iop_msg_alloc(struct iop_softc *sc, int flags)
1799 {
1800 	struct iop_msg *im;
1801 	static u_int tctxgen;
1802 	int i;
1803 
1804 #ifdef I2ODEBUG
1805 	if ((flags & IM_SYSMASK) != 0)
1806 		panic("iop_msg_alloc: system flags specified");
1807 #endif
1808 
1809 	mutex_spin_enter(&sc->sc_intrlock);
1810 	im = SLIST_FIRST(&sc->sc_im_freelist);
1811 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1812 	if (im == NULL)
1813 		panic("iop_msg_alloc: no free wrappers");
1814 #endif
1815 	SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1816 	mutex_spin_exit(&sc->sc_intrlock);
1817 
1818 	im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1819 	tctxgen += (1 << IOP_TCTX_SHIFT);
1820 	im->im_flags = flags | IM_ALLOCED;
1821 	im->im_rb = NULL;
1822 	i = 0;
1823 	do {
1824 		im->im_xfer[i++].ix_size = 0;
1825 	} while (i < IOP_MAX_MSG_XFERS);
1826 
1827 	return (im);
1828 }
1829 
1830 /*
1831  * Free a message wrapper.
1832  */
1833 void
iop_msg_free(struct iop_softc * sc,struct iop_msg * im)1834 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1835 {
1836 
1837 #ifdef I2ODEBUG
1838 	if ((im->im_flags & IM_ALLOCED) == 0)
1839 		panic("iop_msg_free: wrapper not allocated");
1840 #endif
1841 
1842 	im->im_flags = 0;
1843 	mutex_spin_enter(&sc->sc_intrlock);
1844 	SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1845 	mutex_spin_exit(&sc->sc_intrlock);
1846 }
1847 
1848 /*
1849  * Map a data transfer.  Write a scatter-gather list into the message frame.
1850  */
1851 int
iop_msg_map(struct iop_softc * sc,struct iop_msg * im,u_int32_t * mb,void * xferaddr,int xfersize,int out,struct proc * up)1852 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1853 	    void *xferaddr, int xfersize, int out, struct proc *up)
1854 {
1855 	bus_dmamap_t dm;
1856 	bus_dma_segment_t *ds;
1857 	struct iop_xfer *ix;
1858 	u_int rv, i, nsegs, flg, off, xn;
1859 	u_int32_t *p;
1860 
1861 	for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1862 		if (ix->ix_size == 0)
1863 			break;
1864 
1865 #ifdef I2ODEBUG
1866 	if (xfersize == 0)
1867 		panic("iop_msg_map: null transfer");
1868 	if (xfersize > IOP_MAX_XFER)
1869 		panic("iop_msg_map: transfer too large");
1870 	if (xn == IOP_MAX_MSG_XFERS)
1871 		panic("iop_msg_map: too many xfers");
1872 #endif
1873 
1874 	/*
1875 	 * Only the first DMA map is static.
1876 	 */
1877 	if (xn != 0) {
1878 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1879 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1880 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1881 		if (rv != 0)
1882 			return (rv);
1883 	}
1884 
1885 	dm = ix->ix_map;
1886 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1887 	    (up == NULL ? BUS_DMA_NOWAIT : 0));
1888 	if (rv != 0)
1889 		goto bad;
1890 
1891 	/*
1892 	 * How many SIMPLE SG elements can we fit in this message?
1893 	 */
1894 	off = mb[0] >> 16;
1895 	p = mb + off;
1896 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1897 
1898 	if (dm->dm_nsegs > nsegs) {
1899 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1900 		rv = EFBIG;
1901 		DPRINTF(("iop_msg_map: too many segs\n"));
1902 		goto bad;
1903 	}
1904 
1905 	nsegs = dm->dm_nsegs;
1906 	xfersize = 0;
1907 
1908 	/*
1909 	 * Write out the SG list.
1910 	 */
1911 	if (out)
1912 		flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1913 	else
1914 		flg = I2O_SGL_SIMPLE;
1915 
1916 	for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1917 		p[0] = (u_int32_t)ds->ds_len | flg;
1918 		p[1] = (u_int32_t)ds->ds_addr;
1919 		xfersize += ds->ds_len;
1920 	}
1921 
1922 	p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1923 	p[1] = (u_int32_t)ds->ds_addr;
1924 	xfersize += ds->ds_len;
1925 
1926 	/* Fix up the transfer record, and sync the map. */
1927 	ix->ix_flags = (out ? IX_OUT : IX_IN);
1928 	ix->ix_size = xfersize;
1929 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1930 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1931 
1932 	/*
1933 	 * If this is the first xfer we've mapped for this message, adjust
1934 	 * the SGL offset field in the message header.
1935 	 */
1936 	if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1937 		mb[0] += (mb[0] >> 12) & 0xf0;
1938 		im->im_flags |= IM_SGLOFFADJ;
1939 	}
1940 	mb[0] += (nsegs << 17);
1941 	return (0);
1942 
1943  bad:
1944  	if (xn != 0)
1945 		bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1946 	return (rv);
1947 }
1948 
1949 /*
1950  * Map a block I/O data transfer (different in that there's only one per
1951  * message maximum, and PAGE addressing may be used).  Write a scatter
1952  * gather list into the message frame.
1953  */
1954 int
iop_msg_map_bio(struct iop_softc * sc,struct iop_msg * im,u_int32_t * mb,void * xferaddr,int xfersize,int out)1955 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1956 		void *xferaddr, int xfersize, int out)
1957 {
1958 	bus_dma_segment_t *ds;
1959 	bus_dmamap_t dm;
1960 	struct iop_xfer *ix;
1961 	u_int rv, i, nsegs, off, slen, tlen, flg;
1962 	paddr_t saddr, eaddr;
1963 	u_int32_t *p;
1964 
1965 #ifdef I2ODEBUG
1966 	if (xfersize == 0)
1967 		panic("iop_msg_map_bio: null transfer");
1968 	if (xfersize > IOP_MAX_XFER)
1969 		panic("iop_msg_map_bio: transfer too large");
1970 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
1971 		panic("iop_msg_map_bio: SGLOFFADJ");
1972 #endif
1973 
1974 	ix = im->im_xfer;
1975 	dm = ix->ix_map;
1976 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1977 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
1978 	if (rv != 0)
1979 		return (rv);
1980 
1981 	off = mb[0] >> 16;
1982 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1983 
1984 	/*
1985 	 * If the transfer is highly fragmented and won't fit using SIMPLE
1986 	 * elements, use PAGE_LIST elements instead.  SIMPLE elements are
1987 	 * potentially more efficient, both for us and the IOP.
1988 	 */
1989 	if (dm->dm_nsegs > nsegs) {
1990 		nsegs = 1;
1991 		p = mb + off + 1;
1992 
1993 		/* XXX This should be done with a bus_space flag. */
1994 		for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
1995 			slen = ds->ds_len;
1996 			saddr = ds->ds_addr;
1997 
1998 			while (slen > 0) {
1999 				eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2000 				tlen = uimin(eaddr - saddr, slen);
2001 				slen -= tlen;
2002 				*p++ = le32toh(saddr);
2003 				saddr = eaddr;
2004 				nsegs++;
2005 			}
2006 		}
2007 
2008 		mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2009 		    I2O_SGL_END;
2010 		if (out)
2011 			mb[off] |= I2O_SGL_DATA_OUT;
2012 	} else {
2013 		p = mb + off;
2014 		nsegs = dm->dm_nsegs;
2015 
2016 		if (out)
2017 			flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2018 		else
2019 			flg = I2O_SGL_SIMPLE;
2020 
2021 		for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2022 			p[0] = (u_int32_t)ds->ds_len | flg;
2023 			p[1] = (u_int32_t)ds->ds_addr;
2024 		}
2025 
2026 		p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2027 		    I2O_SGL_END;
2028 		p[1] = (u_int32_t)ds->ds_addr;
2029 		nsegs <<= 1;
2030 	}
2031 
2032 	/* Fix up the transfer record, and sync the map. */
2033 	ix->ix_flags = (out ? IX_OUT : IX_IN);
2034 	ix->ix_size = xfersize;
2035 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2036 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2037 
2038 	/*
2039 	 * Adjust the SGL offset and total message size fields.  We don't
2040 	 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2041 	 */
2042 	mb[0] += ((off << 4) + (nsegs << 16));
2043 	return (0);
2044 }
2045 
2046 /*
2047  * Unmap all data transfers associated with a message wrapper.
2048  */
2049 void
iop_msg_unmap(struct iop_softc * sc,struct iop_msg * im)2050 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2051 {
2052 	struct iop_xfer *ix;
2053 	int i;
2054 
2055 #ifdef I2ODEBUG
2056 	if (im->im_xfer[0].ix_size == 0)
2057 		panic("iop_msg_unmap: no transfers mapped");
2058 #endif
2059 
2060 	for (ix = im->im_xfer, i = 0;;) {
2061 		bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2062 		    ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2063 		    BUS_DMASYNC_POSTREAD);
2064 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2065 
2066 		/* Only the first DMA map is static. */
2067 		if (i != 0)
2068 			bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2069 		if (++i >= IOP_MAX_MSG_XFERS)
2070 			break;
2071 		if ((++ix)->ix_size == 0)
2072 			break;
2073 	}
2074 }
2075 
2076 /*
2077  * Post a message frame to the IOP's inbound queue.
2078  */
2079 int
iop_post(struct iop_softc * sc,u_int32_t * mb)2080 iop_post(struct iop_softc *sc, u_int32_t *mb)
2081 {
2082 	u_int32_t mfa;
2083 
2084 #ifdef I2ODEBUG
2085 	if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2086 		panic("iop_post: frame too large");
2087 #endif
2088 
2089 	mutex_spin_enter(&sc->sc_intrlock);
2090 
2091 	/* Allocate a slot with the IOP. */
2092 	if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2093 		if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2094 			mutex_spin_exit(&sc->sc_intrlock);
2095 			aprint_error_dev(sc->sc_dev, "mfa not forthcoming\n");
2096 			return (EAGAIN);
2097 		}
2098 
2099 	/* Perform reply buffer DMA synchronisation. */
2100 	if (sc->sc_rep_size != 0) {
2101 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2102 		    sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2103 	}
2104 
2105 	/* Copy out the message frame. */
2106 	bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2107 	    mb[0] >> 16);
2108 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2109 	    (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2110 
2111 	/* Post the MFA back to the IOP. */
2112 	iop_outl(sc, IOP_REG_IFIFO, mfa);
2113 
2114 	mutex_spin_exit(&sc->sc_intrlock);
2115 	return (0);
2116 }
2117 
2118 /*
2119  * Post a message to the IOP and deal with completion.
2120  */
2121 int
iop_msg_post(struct iop_softc * sc,struct iop_msg * im,void * xmb,int timo)2122 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2123 {
2124 	u_int32_t *mb;
2125 	int rv;
2126 
2127 	mb = xmb;
2128 
2129 	/* Terminate the scatter/gather list chain. */
2130 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
2131 		mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2132 
2133 	if ((rv = iop_post(sc, mb)) != 0)
2134 		return (rv);
2135 
2136 	if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2137 		if ((im->im_flags & IM_POLL) != 0)
2138 			iop_msg_poll(sc, im, timo);
2139 		else
2140 			iop_msg_wait(sc, im, timo);
2141 
2142 		mutex_spin_enter(&sc->sc_intrlock);
2143 		if ((im->im_flags & IM_REPLIED) != 0) {
2144 			if ((im->im_flags & IM_NOSTATUS) != 0)
2145 				rv = 0;
2146 			else if ((im->im_flags & IM_FAIL) != 0)
2147 				rv = ENXIO;
2148 			else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2149 				rv = EIO;
2150 			else
2151 				rv = 0;
2152 		} else
2153 			rv = EBUSY;
2154 		mutex_spin_exit(&sc->sc_intrlock);
2155 	} else
2156 		rv = 0;
2157 
2158 	return (rv);
2159 }
2160 
2161 /*
2162  * Spin until the specified message is replied to.
2163  */
2164 static void
iop_msg_poll(struct iop_softc * sc,struct iop_msg * im,int timo)2165 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2166 {
2167 	u_int32_t rmfa;
2168 
2169 	mutex_spin_enter(&sc->sc_intrlock);
2170 
2171 	for (timo *= 10; timo != 0; timo--) {
2172 		if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2173 			/* Double read to account for IOP bug. */
2174 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
2175 			if (rmfa == IOP_MFA_EMPTY)
2176 				rmfa = iop_inl(sc, IOP_REG_OFIFO);
2177 			if (rmfa != IOP_MFA_EMPTY) {
2178 				iop_handle_reply(sc, rmfa);
2179 
2180 				/*
2181 				 * Return the reply frame to the IOP's
2182 				 * outbound FIFO.
2183 				 */
2184 				iop_outl(sc, IOP_REG_OFIFO, rmfa);
2185 			}
2186 		}
2187 		if ((im->im_flags & IM_REPLIED) != 0)
2188 			break;
2189 		mutex_spin_exit(&sc->sc_intrlock);
2190 		DELAY(100);
2191 		mutex_spin_enter(&sc->sc_intrlock);
2192 	}
2193 
2194 	if (timo == 0) {
2195 #ifdef I2ODEBUG
2196 		printf("%s: poll - no reply\n", device_xname(sc->sc_dev));
2197 		if (iop_status_get(sc, 1) != 0)
2198 			printf("iop_msg_poll: unable to retrieve status\n");
2199 		else
2200 			printf("iop_msg_poll: IOP state = %d\n",
2201 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2202 #endif
2203 	}
2204 
2205 	mutex_spin_exit(&sc->sc_intrlock);
2206 }
2207 
2208 /*
2209  * Sleep until the specified message is replied to.
2210  */
2211 static void
iop_msg_wait(struct iop_softc * sc,struct iop_msg * im,int timo)2212 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2213 {
2214 	int rv;
2215 
2216 	mutex_spin_enter(&sc->sc_intrlock);
2217 	if ((im->im_flags & IM_REPLIED) != 0) {
2218 		mutex_spin_exit(&sc->sc_intrlock);
2219 		return;
2220 	}
2221 	rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2222 	mutex_spin_exit(&sc->sc_intrlock);
2223 
2224 #ifdef I2ODEBUG
2225 	if (rv != 0) {
2226 		printf("iop_msg_wait: tsleep() == %d\n", rv);
2227 		if (iop_status_get(sc, 0) != 0)
2228 			printf("%s: unable to retrieve status\n", __func__);
2229 		else
2230 			printf("%s: IOP state = %d\n", __func__,
2231 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2232 	}
2233 #else
2234 	__USE(rv);
2235 #endif
2236 }
2237 
2238 /*
2239  * Release an unused message frame back to the IOP's inbound fifo.
2240  */
2241 static void
iop_release_mfa(struct iop_softc * sc,u_int32_t mfa)2242 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2243 {
2244 
2245 	/* Use the frame to issue a no-op. */
2246 	iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2247 	iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2248 	iop_outl_msg(sc, mfa + 8, 0);
2249 	iop_outl_msg(sc, mfa + 12, 0);
2250 
2251 	iop_outl(sc, IOP_REG_IFIFO, mfa);
2252 }
2253 
2254 #ifdef I2ODEBUG
2255 /*
2256  * Dump a reply frame header.
2257  */
2258 static void
iop_reply_print(struct iop_softc * sc,struct i2o_reply * rb)2259 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2260 {
2261 	u_int function, detail;
2262 	const char *statusstr;
2263 
2264 	function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2265 	detail = le16toh(rb->detail);
2266 
2267 	printf("%s: reply:\n", device_xname(sc->sc_dev));
2268 
2269 	if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2270 		statusstr = iop_status[rb->reqstatus];
2271 	else
2272 		statusstr = "undefined error code";
2273 
2274 	printf("%s:   function=0x%02x status=0x%02x (%s)\n",
2275 	    device_xname(sc->sc_dev), function, rb->reqstatus, statusstr);
2276 	printf("%s:   detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2277 	    device_xname(sc->sc_dev), detail, le32toh(rb->msgictx),
2278 	    le32toh(rb->msgtctx));
2279 	printf("%s:   tidi=%d tidt=%d flags=0x%02x\n", device_xname(sc->sc_dev),
2280 	    (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2281 	    (le32toh(rb->msgflags) >> 8) & 0xff);
2282 }
2283 #endif
2284 
2285 /*
2286  * Dump a transport failure reply.
2287  */
2288 static void
iop_tfn_print(struct iop_softc * sc,struct i2o_fault_notify * fn)2289 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2290 {
2291 
2292 	printf("%s: WARNING: transport failure:\n", device_xname(sc->sc_dev));
2293 
2294 	printf("%s:  ictx=0x%08x tctx=0x%08x\n", device_xname(sc->sc_dev),
2295 	    le32toh(fn->msgictx), le32toh(fn->msgtctx));
2296 	printf("%s:  failurecode=0x%02x severity=0x%02x\n",
2297 	    device_xname(sc->sc_dev), fn->failurecode, fn->severity);
2298 	printf("%s:  highestver=0x%02x lowestver=0x%02x\n",
2299 	    device_xname(sc->sc_dev), fn->highestver, fn->lowestver);
2300 }
2301 
2302 /*
2303  * Translate an I2O ASCII field into a C string.
2304  */
2305 void
iop_strvis(struct iop_softc * sc,const char * src,int slen,char * dst,int dlen)2306 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2307 {
2308 	int hc, lc, i, nit;
2309 
2310 	dlen--;
2311 	lc = 0;
2312 	hc = 0;
2313 	i = 0;
2314 
2315 	/*
2316 	 * DPT use NUL as a space, whereas AMI use it as a terminator.  The
2317 	 * spec has nothing to say about it.  Since AMI fields are usually
2318 	 * filled with junk after the terminator, ...
2319 	 */
2320 	nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2321 
2322 	while (slen-- != 0 && dlen-- != 0) {
2323 		if (nit && *src == '\0')
2324 			break;
2325 		else if (*src <= 0x20 || *src >= 0x7f) {
2326 			if (hc)
2327 				dst[i++] = ' ';
2328 		} else {
2329 			hc = 1;
2330 			dst[i++] = *src;
2331 			lc = i;
2332 		}
2333 		src++;
2334 	}
2335 
2336 	dst[lc] = '\0';
2337 }
2338 
2339 /*
2340  * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2341  */
2342 int
iop_print_ident(struct iop_softc * sc,int tid)2343 iop_print_ident(struct iop_softc *sc, int tid)
2344 {
2345 	struct {
2346 		struct	i2o_param_op_results pr;
2347 		struct	i2o_param_read_results prr;
2348 		struct	i2o_param_device_identity di;
2349 	} __packed p;
2350 	char buf[32];
2351 	int rv;
2352 
2353 	rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2354 	    sizeof(p), NULL);
2355 	if (rv != 0)
2356 		return (rv);
2357 
2358 	iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2359 	    sizeof(buf));
2360 	printf(" <%s, ", buf);
2361 	iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2362 	    sizeof(buf));
2363 	printf("%s, ", buf);
2364 	iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2365 	printf("%s>", buf);
2366 
2367 	return (0);
2368 }
2369 
2370 /*
2371  * Claim or unclaim the specified TID.
2372  */
2373 int
iop_util_claim(struct iop_softc * sc,struct iop_initiator * ii,int release,int flags)2374 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2375 	       int flags)
2376 {
2377 	struct iop_msg *im;
2378 	struct i2o_util_claim mf;
2379 	int rv, func;
2380 
2381 	func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2382 	im = iop_msg_alloc(sc, IM_WAIT);
2383 
2384 	/* We can use the same structure, as they're identical. */
2385 	mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2386 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2387 	mf.msgictx = ii->ii_ictx;
2388 	mf.msgtctx = im->im_tctx;
2389 	mf.flags = flags;
2390 
2391 	rv = iop_msg_post(sc, im, &mf, 5000);
2392 	iop_msg_free(sc, im);
2393 	return (rv);
2394 }
2395 
2396 /*
2397  * Perform an abort.
2398  */
iop_util_abort(struct iop_softc * sc,struct iop_initiator * ii,int func,int tctxabort,int flags)2399 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2400 		   int tctxabort, int flags)
2401 {
2402 	struct iop_msg *im;
2403 	struct i2o_util_abort mf;
2404 	int rv;
2405 
2406 	im = iop_msg_alloc(sc, IM_WAIT);
2407 
2408 	mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2409 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2410 	mf.msgictx = ii->ii_ictx;
2411 	mf.msgtctx = im->im_tctx;
2412 	mf.flags = (func << 24) | flags;
2413 	mf.tctxabort = tctxabort;
2414 
2415 	rv = iop_msg_post(sc, im, &mf, 5000);
2416 	iop_msg_free(sc, im);
2417 	return (rv);
2418 }
2419 
2420 /*
2421  * Enable or disable reception of events for the specified device.
2422  */
iop_util_eventreg(struct iop_softc * sc,struct iop_initiator * ii,int mask)2423 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2424 {
2425 	struct i2o_util_event_register mf;
2426 
2427 	mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2428 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2429 	mf.msgictx = ii->ii_ictx;
2430 	mf.msgtctx = 0;
2431 	mf.eventmask = mask;
2432 
2433 	/* This message is replied to only when events are signalled. */
2434 	return (iop_post(sc, (u_int32_t *)&mf));
2435 }
2436 
2437 int
iopopen(dev_t dev,int flag,int mode,struct lwp * l)2438 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2439 {
2440 	struct iop_softc *sc;
2441 
2442 	if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
2443 		return (ENXIO);
2444 	if ((sc->sc_flags & IOP_ONLINE) == 0)
2445 		return (ENXIO);
2446 	if ((sc->sc_flags & IOP_OPEN) != 0)
2447 		return (EBUSY);
2448 	sc->sc_flags |= IOP_OPEN;
2449 
2450 	return (0);
2451 }
2452 
2453 int
iopclose(dev_t dev,int flag,int mode,struct lwp * l)2454 iopclose(dev_t dev, int flag, int mode,
2455     struct lwp *l)
2456 {
2457 	struct iop_softc *sc;
2458 
2459 	sc = device_lookup_private(&iop_cd, minor(dev));
2460 	sc->sc_flags &= ~IOP_OPEN;
2461 
2462 	return (0);
2463 }
2464 
2465 int
iopioctl(dev_t dev,u_long cmd,void * data,int flag,struct lwp * l)2466 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2467 {
2468 	struct iop_softc *sc;
2469 	struct iovec *iov;
2470 	int rv, i;
2471 
2472 	sc = device_lookup_private(&iop_cd, minor(dev));
2473 	rv = 0;
2474 
2475 	switch (cmd) {
2476 	case IOPIOCPT:
2477 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
2478 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2479 		if (rv)
2480 			return (rv);
2481 
2482 		return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2483 
2484 	case IOPIOCGSTATUS:
2485 		iov = (struct iovec *)data;
2486 		i = sizeof(struct i2o_status);
2487 		if (i > iov->iov_len)
2488 			i = iov->iov_len;
2489 		else
2490 			iov->iov_len = i;
2491 		if ((rv = iop_status_get(sc, 0)) == 0)
2492 			rv = copyout(&sc->sc_status, iov->iov_base, i);
2493 		return (rv);
2494 
2495 	case IOPIOCGLCT:
2496 	case IOPIOCGTIDMAP:
2497 	case IOPIOCRECONFIG:
2498 		break;
2499 
2500 	default:
2501 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2502 		printf("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd);
2503 #endif
2504 		return (ENOTTY);
2505 	}
2506 
2507 	mutex_enter(&sc->sc_conflock);
2508 
2509 	switch (cmd) {
2510 	case IOPIOCGLCT:
2511 		iov = (struct iovec *)data;
2512 		i = le16toh(sc->sc_lct->tablesize) << 2;
2513 		if (i > iov->iov_len)
2514 			i = iov->iov_len;
2515 		else
2516 			iov->iov_len = i;
2517 		rv = copyout(sc->sc_lct, iov->iov_base, i);
2518 		break;
2519 
2520 	case IOPIOCRECONFIG:
2521 		rv = iop_reconfigure(sc, 0);
2522 		break;
2523 
2524 	case IOPIOCGTIDMAP:
2525 		iov = (struct iovec *)data;
2526 		i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2527 		if (i > iov->iov_len)
2528 			i = iov->iov_len;
2529 		else
2530 			iov->iov_len = i;
2531 		rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2532 		break;
2533 	}
2534 
2535 	mutex_exit(&sc->sc_conflock);
2536 	return (rv);
2537 }
2538 
2539 static int
iop_passthrough(struct iop_softc * sc,struct ioppt * pt,struct proc * p)2540 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2541 {
2542 	struct iop_msg *im;
2543 	struct i2o_msg *mf;
2544 	struct ioppt_buf *ptb;
2545 	int rv, i, mapped;
2546 
2547 	mf = NULL;
2548 	im = NULL;
2549 	mapped = 1;
2550 
2551 	if (pt->pt_msglen > sc->sc_framesize ||
2552 	    pt->pt_msglen < sizeof(struct i2o_msg) ||
2553 	    pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2554 	    pt->pt_nbufs < 0 ||
2555 #if 0
2556 	    pt->pt_replylen < 0 ||
2557 #endif
2558             pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2559 		return (EINVAL);
2560 
2561 	for (i = 0; i < pt->pt_nbufs; i++)
2562 		if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2563 			rv = ENOMEM;
2564 			goto bad;
2565 		}
2566 
2567 	mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2568 	if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2569 		goto bad;
2570 
2571 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2572 	im->im_rb = (struct i2o_reply *)mf;
2573 	mf->msgictx = IOP_ICTX;
2574 	mf->msgtctx = im->im_tctx;
2575 
2576 	for (i = 0; i < pt->pt_nbufs; i++) {
2577 		ptb = &pt->pt_bufs[i];
2578 		rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2579 		    ptb->ptb_datalen, ptb->ptb_out != 0, p);
2580 		if (rv != 0)
2581 			goto bad;
2582 		mapped = 1;
2583 	}
2584 
2585 	if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2586 		goto bad;
2587 
2588 	i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2589 	if (i > sc->sc_framesize)
2590 		i = sc->sc_framesize;
2591 	if (i > pt->pt_replylen)
2592 		i = pt->pt_replylen;
2593 	rv = copyout(im->im_rb, pt->pt_reply, i);
2594 
2595  bad:
2596 	if (mapped != 0)
2597 		iop_msg_unmap(sc, im);
2598 	if (im != NULL)
2599 		iop_msg_free(sc, im);
2600 	if (mf != NULL)
2601 		free(mf, M_DEVBUF);
2602 	return (rv);
2603 }
2604