1 /*	$NetBSD: iop.c,v 1.87 2015/08/16 19:21:33 msaitoh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2001, 2002, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Support for I2O IOPs (intelligent I/O processors).
34  */
35 
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: iop.c,v 1.87 2015/08/16 19:21:33 msaitoh Exp $");
38 
39 #include "iop.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/device.h>
45 #include <sys/queue.h>
46 #include <sys/proc.h>
47 #include <sys/malloc.h>
48 #include <sys/ioctl.h>
49 #include <sys/endian.h>
50 #include <sys/conf.h>
51 #include <sys/kthread.h>
52 #include <sys/kauth.h>
53 #include <sys/bus.h>
54 
55 #include <dev/i2o/i2o.h>
56 #include <dev/i2o/iopio.h>
57 #include <dev/i2o/iopreg.h>
58 #include <dev/i2o/iopvar.h>
59 
60 #include "locators.h"
61 
62 #define POLL(ms, cond)				\
63 do {						\
64 	int xi;					\
65 	for (xi = (ms) * 10; xi; xi--) {	\
66 		if (cond)			\
67 			break;			\
68 		DELAY(100);			\
69 	}					\
70 } while (/* CONSTCOND */0);
71 
72 #ifdef I2ODEBUG
73 #define DPRINTF(x)	printf x
74 #else
75 #define	DPRINTF(x)
76 #endif
77 
78 #define IOP_ICTXHASH_NBUCKETS	16
79 #define	IOP_ICTXHASH(ictx)	(&iop_ictxhashtbl[(ictx) & iop_ictxhash])
80 
81 #define	IOP_MAX_SEGS	(((IOP_MAX_XFER + PAGE_SIZE - 1) / PAGE_SIZE) + 1)
82 
83 #define	IOP_TCTX_SHIFT	12
84 #define	IOP_TCTX_MASK	((1 << IOP_TCTX_SHIFT) - 1)
85 
86 static LIST_HEAD(, iop_initiator) *iop_ictxhashtbl;
87 static u_long	iop_ictxhash;
88 static void	*iop_sdh;
89 static struct	i2o_systab *iop_systab;
90 static int	iop_systab_size;
91 
92 extern struct cfdriver iop_cd;
93 
94 dev_type_open(iopopen);
95 dev_type_close(iopclose);
96 dev_type_ioctl(iopioctl);
97 
98 const struct cdevsw iop_cdevsw = {
99 	.d_open = iopopen,
100 	.d_close = iopclose,
101 	.d_read = noread,
102 	.d_write = nowrite,
103 	.d_ioctl = iopioctl,
104 	.d_stop = nostop,
105 	.d_tty = notty,
106 	.d_poll = nopoll,
107 	.d_mmap = nommap,
108 	.d_kqfilter = nokqfilter,
109 	.d_discard = nodiscard,
110 	.d_flag = D_OTHER,
111 };
112 
113 #define	IC_CONFIGURE	0x01
114 #define	IC_PRIORITY	0x02
115 
116 static struct iop_class {
117 	u_short	ic_class;
118 	u_short	ic_flags;
119 	const char *ic_caption;
120 } const iop_class[] = {
121 	{
122 		I2O_CLASS_EXECUTIVE,
123 		0,
124 		"executive"
125 	},
126 	{
127 		I2O_CLASS_DDM,
128 		0,
129 		"device driver module"
130 	},
131 	{
132 		I2O_CLASS_RANDOM_BLOCK_STORAGE,
133 		IC_CONFIGURE | IC_PRIORITY,
134 		"random block storage"
135 	},
136 	{
137 		I2O_CLASS_SEQUENTIAL_STORAGE,
138 		IC_CONFIGURE | IC_PRIORITY,
139 		"sequential storage"
140 	},
141 	{
142 		I2O_CLASS_LAN,
143 		IC_CONFIGURE | IC_PRIORITY,
144 		"LAN port"
145 	},
146 	{
147 		I2O_CLASS_WAN,
148 		IC_CONFIGURE | IC_PRIORITY,
149 		"WAN port"
150 	},
151 	{
152 		I2O_CLASS_FIBRE_CHANNEL_PORT,
153 		IC_CONFIGURE,
154 		"fibrechannel port"
155 	},
156 	{
157 		I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL,
158 		0,
159 		"fibrechannel peripheral"
160 	},
161  	{
162  		I2O_CLASS_SCSI_PERIPHERAL,
163  		0,
164  		"SCSI peripheral"
165  	},
166 	{
167 		I2O_CLASS_ATE_PORT,
168 		IC_CONFIGURE,
169 		"ATE port"
170 	},
171 	{
172 		I2O_CLASS_ATE_PERIPHERAL,
173 		0,
174 		"ATE peripheral"
175 	},
176 	{
177 		I2O_CLASS_FLOPPY_CONTROLLER,
178 		IC_CONFIGURE,
179 		"floppy controller"
180 	},
181 	{
182 		I2O_CLASS_FLOPPY_DEVICE,
183 		0,
184 		"floppy device"
185 	},
186 	{
187 		I2O_CLASS_BUS_ADAPTER_PORT,
188 		IC_CONFIGURE,
189 		"bus adapter port"
190 	},
191 };
192 
193 #ifdef I2ODEBUG
194 static const char * const iop_status[] = {
195 	"success",
196 	"abort (dirty)",
197 	"abort (no data transfer)",
198 	"abort (partial transfer)",
199 	"error (dirty)",
200 	"error (no data transfer)",
201 	"error (partial transfer)",
202 	"undefined error code",
203 	"process abort (dirty)",
204 	"process abort (no data transfer)",
205 	"process abort (partial transfer)",
206 	"transaction error",
207 };
208 #endif
209 
210 static inline u_int32_t	iop_inl(struct iop_softc *, int);
211 static inline void	iop_outl(struct iop_softc *, int, u_int32_t);
212 
213 static inline u_int32_t	iop_inl_msg(struct iop_softc *, int);
214 static inline void	iop_outl_msg(struct iop_softc *, int, u_int32_t);
215 
216 static void	iop_config_interrupts(device_t);
217 static void	iop_configure_devices(struct iop_softc *, int, int);
218 static void	iop_devinfo(int, char *, size_t);
219 static int	iop_print(void *, const char *);
220 static void	iop_shutdown(void *);
221 
222 static void	iop_adjqparam(struct iop_softc *, int);
223 static int	iop_handle_reply(struct iop_softc *, u_int32_t);
224 static int	iop_hrt_get(struct iop_softc *);
225 static int	iop_hrt_get0(struct iop_softc *, struct i2o_hrt *, int);
226 static void	iop_intr_event(device_t, struct iop_msg *, void *);
227 static int	iop_lct_get0(struct iop_softc *, struct i2o_lct *, int,
228 			     u_int32_t);
229 static void	iop_msg_poll(struct iop_softc *, struct iop_msg *, int);
230 static void	iop_msg_wait(struct iop_softc *, struct iop_msg *, int);
231 static int	iop_ofifo_init(struct iop_softc *);
232 static int	iop_passthrough(struct iop_softc *, struct ioppt *,
233 				struct proc *);
234 static void	iop_reconf_thread(void *);
235 static void	iop_release_mfa(struct iop_softc *, u_int32_t);
236 static int	iop_reset(struct iop_softc *);
237 static int	iop_sys_enable(struct iop_softc *);
238 static int	iop_systab_set(struct iop_softc *);
239 static void	iop_tfn_print(struct iop_softc *, struct i2o_fault_notify *);
240 
241 #ifdef I2ODEBUG
242 static void	iop_reply_print(struct iop_softc *, struct i2o_reply *);
243 #endif
244 
245 static inline u_int32_t
iop_inl(struct iop_softc * sc,int off)246 iop_inl(struct iop_softc *sc, int off)
247 {
248 
249 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
250 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
251 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, off));
252 }
253 
254 static inline void
iop_outl(struct iop_softc * sc,int off,u_int32_t val)255 iop_outl(struct iop_softc *sc, int off, u_int32_t val)
256 {
257 
258 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val);
259 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, off, 4,
260 	    BUS_SPACE_BARRIER_WRITE);
261 }
262 
263 static inline u_int32_t
iop_inl_msg(struct iop_softc * sc,int off)264 iop_inl_msg(struct iop_softc *sc, int off)
265 {
266 
267 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
268 	    BUS_SPACE_BARRIER_WRITE | BUS_SPACE_BARRIER_READ);
269 	return (bus_space_read_4(sc->sc_msg_iot, sc->sc_msg_ioh, off));
270 }
271 
272 static inline void
iop_outl_msg(struct iop_softc * sc,int off,u_int32_t val)273 iop_outl_msg(struct iop_softc *sc, int off, u_int32_t val)
274 {
275 
276 	bus_space_write_4(sc->sc_msg_iot, sc->sc_msg_ioh, off, val);
277 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, off, 4,
278 	    BUS_SPACE_BARRIER_WRITE);
279 }
280 
281 /*
282  * Initialise the IOP and our interface.
283  */
284 void
iop_init(struct iop_softc * sc,const char * intrstr)285 iop_init(struct iop_softc *sc, const char *intrstr)
286 {
287 	struct iop_msg *im;
288 	int rv, i, j, state, nsegs;
289 	u_int32_t mask;
290 	char ident[64];
291 
292 	state = 0;
293 
294 	printf("I2O adapter");
295 
296 	mutex_init(&sc->sc_intrlock, MUTEX_DEFAULT, IPL_VM);
297 	mutex_init(&sc->sc_conflock, MUTEX_DEFAULT, IPL_NONE);
298 	cv_init(&sc->sc_confcv, "iopconf");
299 
300 	if (iop_ictxhashtbl == NULL) {
301 		iop_ictxhashtbl = hashinit(IOP_ICTXHASH_NBUCKETS, HASH_LIST,
302 		    true, &iop_ictxhash);
303 	}
304 
305 	/* Disable interrupts at the IOP. */
306 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
307 	iop_outl(sc, IOP_REG_INTR_MASK, mask | IOP_INTR_OFIFO);
308 
309 	/* Allocate a scratch DMA map for small miscellaneous shared data. */
310 	if (bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0,
311 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &sc->sc_scr_dmamap) != 0) {
312 		aprint_error_dev(sc->sc_dev, "cannot create scratch dmamap\n");
313 		return;
314 	}
315 
316 	if (bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, PAGE_SIZE, 0,
317 	    sc->sc_scr_seg, 1, &nsegs, BUS_DMA_NOWAIT) != 0) {
318 		aprint_error_dev(sc->sc_dev, "cannot alloc scratch dmamem\n");
319 		goto bail_out;
320 	}
321 	state++;
322 
323 	if (bus_dmamem_map(sc->sc_dmat, sc->sc_scr_seg, nsegs, PAGE_SIZE,
324 	    &sc->sc_scr, 0)) {
325 		aprint_error_dev(sc->sc_dev, "cannot map scratch dmamem\n");
326 		goto bail_out;
327 	}
328 	state++;
329 
330 	if (bus_dmamap_load(sc->sc_dmat, sc->sc_scr_dmamap, sc->sc_scr,
331 	    PAGE_SIZE, NULL, BUS_DMA_NOWAIT)) {
332 		aprint_error_dev(sc->sc_dev, "cannot load scratch dmamap\n");
333 		goto bail_out;
334 	}
335 	state++;
336 
337 #ifdef I2ODEBUG
338 	/* So that our debug checks don't choke. */
339 	sc->sc_framesize = 128;
340 #endif
341 
342 	/* Avoid syncing the reply map until it's set up. */
343 	sc->sc_curib = 0x123;
344 
345 	/* Reset the adapter and request status. */
346  	if ((rv = iop_reset(sc)) != 0) {
347  		aprint_error_dev(sc->sc_dev, "not responding (reset)\n");
348 		goto bail_out;
349  	}
350 
351  	if ((rv = iop_status_get(sc, 1)) != 0) {
352 		aprint_error_dev(sc->sc_dev, "not responding (get status)\n");
353 		goto bail_out;
354  	}
355 
356 	sc->sc_flags |= IOP_HAVESTATUS;
357 	iop_strvis(sc, sc->sc_status.productid, sizeof(sc->sc_status.productid),
358 	    ident, sizeof(ident));
359 	printf(" <%s>\n", ident);
360 
361 #ifdef I2ODEBUG
362 	printf("%s: orgid=0x%04x version=%d\n",
363 	    device_xname(sc->sc_dev),
364 	    le16toh(sc->sc_status.orgid),
365 	    (le32toh(sc->sc_status.segnumber) >> 12) & 15);
366 	printf("%s: type want have cbase\n", device_xname(sc->sc_dev));
367 	printf("%s: mem  %04x %04x %08x\n", device_xname(sc->sc_dev),
368 	    le32toh(sc->sc_status.desiredprivmemsize),
369 	    le32toh(sc->sc_status.currentprivmemsize),
370 	    le32toh(sc->sc_status.currentprivmembase));
371 	printf("%s: i/o  %04x %04x %08x\n", device_xname(sc->sc_dev),
372 	    le32toh(sc->sc_status.desiredpriviosize),
373 	    le32toh(sc->sc_status.currentpriviosize),
374 	    le32toh(sc->sc_status.currentpriviobase));
375 #endif
376 
377 	sc->sc_maxob = le32toh(sc->sc_status.maxoutboundmframes);
378 	if (sc->sc_maxob > IOP_MAX_OUTBOUND)
379 		sc->sc_maxob = IOP_MAX_OUTBOUND;
380 	sc->sc_maxib = le32toh(sc->sc_status.maxinboundmframes);
381 	if (sc->sc_maxib > IOP_MAX_INBOUND)
382 		sc->sc_maxib = IOP_MAX_INBOUND;
383 	sc->sc_framesize = le16toh(sc->sc_status.inboundmframesize) << 2;
384 	if (sc->sc_framesize > IOP_MAX_MSG_SIZE)
385 		sc->sc_framesize = IOP_MAX_MSG_SIZE;
386 
387 #if defined(I2ODEBUG) || defined(DIAGNOSTIC)
388 	if (sc->sc_framesize < IOP_MIN_MSG_SIZE) {
389 		aprint_error_dev(sc->sc_dev, "frame size too small (%d)\n",
390 		    sc->sc_framesize);
391 		goto bail_out;
392 	}
393 #endif
394 
395 	/* Allocate message wrappers. */
396 	im = malloc(sizeof(*im) * sc->sc_maxib, M_DEVBUF, M_NOWAIT|M_ZERO);
397 	if (im == NULL) {
398 		aprint_error_dev(sc->sc_dev, "memory allocation failure\n");
399 		goto bail_out;
400 	}
401 	state++;
402 	sc->sc_ims = im;
403 	SLIST_INIT(&sc->sc_im_freelist);
404 
405 	for (i = 0; i < sc->sc_maxib; i++, im++) {
406 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
407 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
408 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
409 		    &im->im_xfer[0].ix_map);
410 		if (rv != 0) {
411 			aprint_error_dev(sc->sc_dev, "couldn't create dmamap (%d)", rv);
412 			goto bail_out3;
413 		}
414 
415 		im->im_tctx = i;
416 		SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
417 		cv_init(&im->im_cv, "iopmsg");
418 	}
419 
420 	/* Initialise the IOP's outbound FIFO. */
421 	if (iop_ofifo_init(sc) != 0) {
422 		aprint_error_dev(sc->sc_dev, "unable to init oubound FIFO\n");
423 		goto bail_out3;
424 	}
425 
426 	/*
427  	 * Defer further configuration until (a) interrupts are working and
428  	 * (b) we have enough information to build the system table.
429  	 */
430 	config_interrupts(sc->sc_dev, iop_config_interrupts);
431 
432 	/* Configure shutdown hook before we start any device activity. */
433 	if (iop_sdh == NULL)
434 		iop_sdh = shutdownhook_establish(iop_shutdown, NULL);
435 
436 	/* Ensure interrupts are enabled at the IOP. */
437 	mask = iop_inl(sc, IOP_REG_INTR_MASK);
438 	iop_outl(sc, IOP_REG_INTR_MASK, mask & ~IOP_INTR_OFIFO);
439 
440 	if (intrstr != NULL)
441 		printf("%s: interrupting at %s\n", device_xname(sc->sc_dev),
442 		    intrstr);
443 
444 #ifdef I2ODEBUG
445 	printf("%s: queue depths: inbound %d/%d, outbound %d/%d\n",
446 	    device_xname(sc->sc_dev), sc->sc_maxib,
447 	    le32toh(sc->sc_status.maxinboundmframes),
448 	    sc->sc_maxob, le32toh(sc->sc_status.maxoutboundmframes));
449 #endif
450 
451 	return;
452 
453  bail_out3:
454  	if (state > 3) {
455 		for (j = 0; j < i; j++)
456 			bus_dmamap_destroy(sc->sc_dmat,
457 			    sc->sc_ims[j].im_xfer[0].ix_map);
458 		free(sc->sc_ims, M_DEVBUF);
459 	}
460  bail_out:
461 	if (state > 2)
462 		bus_dmamap_unload(sc->sc_dmat, sc->sc_scr_dmamap);
463 	if (state > 1)
464 		bus_dmamem_unmap(sc->sc_dmat, sc->sc_scr, PAGE_SIZE);
465 	if (state > 0)
466 		bus_dmamem_free(sc->sc_dmat, sc->sc_scr_seg, nsegs);
467 	bus_dmamap_destroy(sc->sc_dmat, sc->sc_scr_dmamap);
468 }
469 
470 /*
471  * Perform autoconfiguration tasks.
472  */
473 static void
iop_config_interrupts(device_t self)474 iop_config_interrupts(device_t self)
475 {
476 	struct iop_attach_args ia;
477 	struct iop_softc *sc, *iop;
478 	struct i2o_systab_entry *ste;
479 	int rv, i, niop;
480 	int locs[IOPCF_NLOCS];
481 
482 	sc = device_private(self);
483 	mutex_enter(&sc->sc_conflock);
484 
485 	LIST_INIT(&sc->sc_iilist);
486 
487 	printf("%s: configuring...\n", device_xname(sc->sc_dev));
488 
489 	if (iop_hrt_get(sc) != 0) {
490 		printf("%s: unable to retrieve HRT\n", device_xname(sc->sc_dev));
491 		mutex_exit(&sc->sc_conflock);
492 		return;
493 	}
494 
495 	/*
496  	 * Build the system table.
497  	 */
498 	if (iop_systab == NULL) {
499 		for (i = 0, niop = 0; i < iop_cd.cd_ndevs; i++) {
500 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
501 				continue;
502 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
503 				continue;
504 			if (iop_status_get(iop, 1) != 0) {
505 				aprint_error_dev(sc->sc_dev, "unable to retrieve status\n");
506 				iop->sc_flags &= ~IOP_HAVESTATUS;
507 				continue;
508 			}
509 			niop++;
510 		}
511 		if (niop == 0) {
512 			mutex_exit(&sc->sc_conflock);
513 			return;
514 		}
515 
516 		i = sizeof(struct i2o_systab_entry) * (niop - 1) +
517 		    sizeof(struct i2o_systab);
518 		iop_systab_size = i;
519 		iop_systab = malloc(i, M_DEVBUF, M_NOWAIT|M_ZERO);
520 
521 		iop_systab->numentries = niop;
522 		iop_systab->version = I2O_VERSION_11;
523 
524 		for (i = 0, ste = iop_systab->entry; i < iop_cd.cd_ndevs; i++) {
525 			if ((iop = device_lookup_private(&iop_cd, i)) == NULL)
526 				continue;
527 			if ((iop->sc_flags & IOP_HAVESTATUS) == 0)
528 				continue;
529 
530 			ste->orgid = iop->sc_status.orgid;
531 			ste->iopid = device_unit(iop->sc_dev) + 2;
532 			ste->segnumber =
533 			    htole32(le32toh(iop->sc_status.segnumber) & ~4095);
534 			ste->iopcaps = iop->sc_status.iopcaps;
535 			ste->inboundmsgframesize =
536 			    iop->sc_status.inboundmframesize;
537 			ste->inboundmsgportaddresslow =
538 			    htole32(iop->sc_memaddr + IOP_REG_IFIFO);
539 			ste++;
540 		}
541 	}
542 
543 	/*
544 	 * Post the system table to the IOP and bring it to the OPERATIONAL
545 	 * state.
546 	 */
547 	if (iop_systab_set(sc) != 0) {
548 		aprint_error_dev(sc->sc_dev, "unable to set system table\n");
549 		mutex_exit(&sc->sc_conflock);
550 		return;
551 	}
552 	if (iop_sys_enable(sc) != 0) {
553 		aprint_error_dev(sc->sc_dev, "unable to enable system\n");
554 		mutex_exit(&sc->sc_conflock);
555 		return;
556 	}
557 
558 	/*
559 	 * Set up an event handler for this IOP.
560 	 */
561 	sc->sc_eventii.ii_dv = self;
562 	sc->sc_eventii.ii_intr = iop_intr_event;
563 	sc->sc_eventii.ii_flags = II_NOTCTX | II_UTILITY;
564 	sc->sc_eventii.ii_tid = I2O_TID_IOP;
565 	iop_initiator_register(sc, &sc->sc_eventii);
566 
567 	rv = iop_util_eventreg(sc, &sc->sc_eventii,
568 	    I2O_EVENT_EXEC_RESOURCE_LIMITS |
569 	    I2O_EVENT_EXEC_CONNECTION_FAIL |
570 	    I2O_EVENT_EXEC_ADAPTER_FAULT |
571 	    I2O_EVENT_EXEC_POWER_FAIL |
572 	    I2O_EVENT_EXEC_RESET_PENDING |
573 	    I2O_EVENT_EXEC_RESET_IMMINENT |
574 	    I2O_EVENT_EXEC_HARDWARE_FAIL |
575 	    I2O_EVENT_EXEC_XCT_CHANGE |
576 	    I2O_EVENT_EXEC_DDM_AVAILIBILITY |
577 	    I2O_EVENT_GEN_DEVICE_RESET |
578 	    I2O_EVENT_GEN_STATE_CHANGE |
579 	    I2O_EVENT_GEN_GENERAL_WARNING);
580 	if (rv != 0) {
581 		aprint_error_dev(sc->sc_dev, "unable to register for events");
582 		mutex_exit(&sc->sc_conflock);
583 		return;
584 	}
585 
586 	/*
587 	 * Attempt to match and attach a product-specific extension.
588 	 */
589 	ia.ia_class = I2O_CLASS_ANY;
590 	ia.ia_tid = I2O_TID_IOP;
591 	locs[IOPCF_TID] = I2O_TID_IOP;
592 	config_found_sm_loc(self, "iop", locs, &ia, iop_print,
593 		config_stdsubmatch);
594 
595 	/*
596 	 * Start device configuration.
597 	 */
598 	if ((rv = iop_reconfigure(sc, 0)) == -1)
599 		aprint_error_dev(sc->sc_dev, "configure failed (%d)\n", rv);
600 
601 
602 	sc->sc_flags |= IOP_ONLINE;
603 	rv = kthread_create(PRI_NONE, 0, NULL, iop_reconf_thread, sc,
604 	    &sc->sc_reconf_thread, "%s", device_xname(sc->sc_dev));
605 	mutex_exit(&sc->sc_conflock);
606  	if (rv != 0) {
607 		aprint_error_dev(sc->sc_dev, "unable to create reconfiguration thread (%d)", rv);
608  		return;
609  	}
610 }
611 
612 /*
613  * Reconfiguration thread; listens for LCT change notification, and
614  * initiates re-configuration if received.
615  */
616 static void
iop_reconf_thread(void * cookie)617 iop_reconf_thread(void *cookie)
618 {
619 	struct iop_softc *sc;
620 	struct i2o_lct lct;
621 	u_int32_t chgind;
622 	int rv;
623 
624 	sc = cookie;
625 	chgind = sc->sc_chgind + 1;
626 
627 	for (;;) {
628 		DPRINTF(("%s: async reconfig: requested 0x%08x\n",
629 		    device_xname(sc->sc_dev), chgind));
630 
631 		rv = iop_lct_get0(sc, &lct, sizeof(lct), chgind);
632 
633 		DPRINTF(("%s: async reconfig: notified (0x%08x, %d)\n",
634 		    device_xname(sc->sc_dev), le32toh(lct.changeindicator), rv));
635 
636 		mutex_enter(&sc->sc_conflock);
637 		if (rv == 0) {
638 			iop_reconfigure(sc, le32toh(lct.changeindicator));
639 			chgind = sc->sc_chgind + 1;
640 		}
641 		(void)cv_timedwait(&sc->sc_confcv, &sc->sc_conflock, hz * 5);
642 		mutex_exit(&sc->sc_conflock);
643 	}
644 }
645 
646 /*
647  * Reconfigure: find new and removed devices.
648  */
649 int
iop_reconfigure(struct iop_softc * sc,u_int chgind)650 iop_reconfigure(struct iop_softc *sc, u_int chgind)
651 {
652 	struct iop_msg *im;
653 	struct i2o_hba_bus_scan mf;
654 	struct i2o_lct_entry *le;
655 	struct iop_initiator *ii, *nextii;
656 	int rv, tid, i;
657 
658 	KASSERT(mutex_owned(&sc->sc_conflock));
659 
660 	/*
661 	 * If the reconfiguration request isn't the result of LCT change
662 	 * notification, then be more thorough: ask all bus ports to scan
663 	 * their busses.  Wait up to 5 minutes for each bus port to complete
664 	 * the request.
665 	 */
666 	if (chgind == 0) {
667 		if ((rv = iop_lct_get(sc)) != 0) {
668 			DPRINTF(("iop_reconfigure: unable to read LCT\n"));
669 			return (rv);
670 		}
671 
672 		le = sc->sc_lct->entry;
673 		for (i = 0; i < sc->sc_nlctent; i++, le++) {
674 			if ((le16toh(le->classid) & 4095) !=
675 			    I2O_CLASS_BUS_ADAPTER_PORT)
676 				continue;
677 			tid = le16toh(le->localtid) & 4095;
678 
679 			im = iop_msg_alloc(sc, IM_WAIT);
680 
681 			mf.msgflags = I2O_MSGFLAGS(i2o_hba_bus_scan);
682 			mf.msgfunc = I2O_MSGFUNC(tid, I2O_HBA_BUS_SCAN);
683 			mf.msgictx = IOP_ICTX;
684 			mf.msgtctx = im->im_tctx;
685 
686 			DPRINTF(("%s: scanning bus %d\n", device_xname(sc->sc_dev),
687 			    tid));
688 
689 			rv = iop_msg_post(sc, im, &mf, 5*60*1000);
690 			iop_msg_free(sc, im);
691 #ifdef I2ODEBUG
692 			if (rv != 0)
693 				aprint_error_dev(sc->sc_dev, "bus scan failed\n");
694 #endif
695 		}
696 	} else if (chgind <= sc->sc_chgind) {
697 		DPRINTF(("%s: LCT unchanged (async)\n", device_xname(sc->sc_dev)));
698 		return (0);
699 	}
700 
701 	/* Re-read the LCT and determine if it has changed. */
702 	if ((rv = iop_lct_get(sc)) != 0) {
703 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
704 		return (rv);
705 	}
706 	DPRINTF(("%s: %d LCT entries\n", device_xname(sc->sc_dev), sc->sc_nlctent));
707 
708 	chgind = le32toh(sc->sc_lct->changeindicator);
709 	if (chgind == sc->sc_chgind) {
710 		DPRINTF(("%s: LCT unchanged\n", device_xname(sc->sc_dev)));
711 		return (0);
712 	}
713 	DPRINTF(("%s: LCT changed\n", device_xname(sc->sc_dev)));
714 	sc->sc_chgind = chgind;
715 
716 	if (sc->sc_tidmap != NULL)
717 		free(sc->sc_tidmap, M_DEVBUF);
718 	sc->sc_tidmap = malloc(sc->sc_nlctent * sizeof(struct iop_tidmap),
719 	    M_DEVBUF, M_NOWAIT|M_ZERO);
720 
721 	/* Allow 1 queued command per device while we're configuring. */
722 	iop_adjqparam(sc, 1);
723 
724 	/*
725 	 * Match and attach child devices.  We configure high-level devices
726 	 * first so that any claims will propagate throughout the LCT,
727 	 * hopefully masking off aliased devices as a result.
728 	 *
729 	 * Re-reading the LCT at this point is a little dangerous, but we'll
730 	 * trust the IOP (and the operator) to behave itself...
731 	 */
732 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
733 	    IC_CONFIGURE | IC_PRIORITY);
734 	if ((rv = iop_lct_get(sc)) != 0) {
735 		DPRINTF(("iop_reconfigure: unable to re-read LCT\n"));
736 	}
737 	iop_configure_devices(sc, IC_CONFIGURE | IC_PRIORITY,
738 	    IC_CONFIGURE);
739 
740 	for (ii = LIST_FIRST(&sc->sc_iilist); ii != NULL; ii = nextii) {
741 		nextii = LIST_NEXT(ii, ii_list);
742 
743 		/* Detach devices that were configured, but are now gone. */
744 		for (i = 0; i < sc->sc_nlctent; i++)
745 			if (ii->ii_tid == sc->sc_tidmap[i].it_tid)
746 				break;
747 		if (i == sc->sc_nlctent ||
748 		    (sc->sc_tidmap[i].it_flags & IT_CONFIGURED) == 0) {
749 			config_detach(ii->ii_dv, DETACH_FORCE);
750 			continue;
751 		}
752 
753 		/*
754 		 * Tell initiators that existed before the re-configuration
755 		 * to re-configure.
756 		 */
757 		if (ii->ii_reconfig == NULL)
758 			continue;
759 		if ((rv = (*ii->ii_reconfig)(ii->ii_dv)) != 0)
760 			aprint_error_dev(sc->sc_dev, "%s failed reconfigure (%d)\n",
761 			    device_xname(ii->ii_dv), rv);
762 	}
763 
764 	/* Re-adjust queue parameters and return. */
765 	if (sc->sc_nii != 0)
766 		iop_adjqparam(sc, (sc->sc_maxib - sc->sc_nuii - IOP_MF_RESERVE)
767 		    / sc->sc_nii);
768 
769 	return (0);
770 }
771 
772 /*
773  * Configure I2O devices into the system.
774  */
775 static void
iop_configure_devices(struct iop_softc * sc,int mask,int maskval)776 iop_configure_devices(struct iop_softc *sc, int mask, int maskval)
777 {
778 	struct iop_attach_args ia;
779 	struct iop_initiator *ii;
780 	const struct i2o_lct_entry *le;
781 	device_t dv;
782 	int i, j, nent;
783 	u_int usertid;
784 	int locs[IOPCF_NLOCS];
785 
786 	nent = sc->sc_nlctent;
787 	for (i = 0, le = sc->sc_lct->entry; i < nent; i++, le++) {
788 		sc->sc_tidmap[i].it_tid = le16toh(le->localtid) & 4095;
789 
790 		/* Ignore the device if it's in use. */
791 		usertid = le32toh(le->usertid) & 4095;
792 		if (usertid != I2O_TID_NONE && usertid != I2O_TID_HOST)
793 			continue;
794 
795 		ia.ia_class = le16toh(le->classid) & 4095;
796 		ia.ia_tid = sc->sc_tidmap[i].it_tid;
797 
798 		/* Ignore uninteresting devices. */
799 		for (j = 0; j < sizeof(iop_class) / sizeof(iop_class[0]); j++)
800 			if (iop_class[j].ic_class == ia.ia_class)
801 				break;
802 		if (j < sizeof(iop_class) / sizeof(iop_class[0]) &&
803 		    (iop_class[j].ic_flags & mask) != maskval)
804 			continue;
805 
806 		/*
807 		 * Try to configure the device only if it's not already
808 		 * configured.
809  		 */
810  		LIST_FOREACH(ii, &sc->sc_iilist, ii_list) {
811  			if (ia.ia_tid == ii->ii_tid) {
812 				sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
813 				strcpy(sc->sc_tidmap[i].it_dvname,
814 				    device_xname(ii->ii_dv));
815  				break;
816 			}
817 		}
818 		if (ii != NULL)
819 			continue;
820 
821 		locs[IOPCF_TID] = ia.ia_tid;
822 
823 		dv = config_found_sm_loc(sc->sc_dev, "iop", locs, &ia,
824 					 iop_print, config_stdsubmatch);
825 		if (dv != NULL) {
826  			sc->sc_tidmap[i].it_flags |= IT_CONFIGURED;
827 			strcpy(sc->sc_tidmap[i].it_dvname, device_xname(dv));
828 		}
829 	}
830 }
831 
832 /*
833  * Adjust queue parameters for all child devices.
834  */
835 static void
iop_adjqparam(struct iop_softc * sc,int mpi)836 iop_adjqparam(struct iop_softc *sc, int mpi)
837 {
838 	struct iop_initiator *ii;
839 
840 	LIST_FOREACH(ii, &sc->sc_iilist, ii_list)
841 		if (ii->ii_adjqparam != NULL)
842 			(*ii->ii_adjqparam)(ii->ii_dv, mpi);
843 }
844 
845 static void
iop_devinfo(int class,char * devinfo,size_t l)846 iop_devinfo(int class, char *devinfo, size_t l)
847 {
848 	int i;
849 
850 	for (i = 0; i < sizeof(iop_class) / sizeof(iop_class[0]); i++)
851 		if (class == iop_class[i].ic_class)
852 			break;
853 
854 	if (i == sizeof(iop_class) / sizeof(iop_class[0]))
855 		snprintf(devinfo, l, "device (class 0x%x)", class);
856 	else
857 		strlcpy(devinfo, iop_class[i].ic_caption, l);
858 }
859 
860 static int
iop_print(void * aux,const char * pnp)861 iop_print(void *aux, const char *pnp)
862 {
863 	struct iop_attach_args *ia;
864 	char devinfo[256];
865 
866 	ia = aux;
867 
868 	if (pnp != NULL) {
869 		iop_devinfo(ia->ia_class, devinfo, sizeof(devinfo));
870 		aprint_normal("%s at %s", devinfo, pnp);
871 	}
872 	aprint_normal(" tid %d", ia->ia_tid);
873 	return (UNCONF);
874 }
875 
876 /*
877  * Shut down all configured IOPs.
878  */
879 static void
iop_shutdown(void * junk)880 iop_shutdown(void *junk)
881 {
882 	struct iop_softc *sc;
883 	int i;
884 
885 	printf("shutting down iop devices...");
886 
887 	for (i = 0; i < iop_cd.cd_ndevs; i++) {
888 		if ((sc = device_lookup_private(&iop_cd, i)) == NULL)
889 			continue;
890 		if ((sc->sc_flags & IOP_ONLINE) == 0)
891 			continue;
892 
893 		iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_SYS_QUIESCE, IOP_ICTX,
894 		    0, 5000);
895 
896 		if (le16toh(sc->sc_status.orgid) != I2O_ORG_AMI) {
897 			/*
898 			 * Some AMI firmware revisions will go to sleep and
899 			 * never come back after this.
900 			 */
901 			iop_simple_cmd(sc, I2O_TID_IOP, I2O_EXEC_IOP_CLEAR,
902 			    IOP_ICTX, 0, 1000);
903 		}
904 	}
905 
906 	/* Wait.  Some boards could still be flushing, stupidly enough. */
907 	delay(5000*1000);
908 	printf(" done\n");
909 }
910 
911 /*
912  * Retrieve IOP status.
913  */
914 int
iop_status_get(struct iop_softc * sc,int nosleep)915 iop_status_get(struct iop_softc *sc, int nosleep)
916 {
917 	struct i2o_exec_status_get mf;
918 	struct i2o_status *st;
919 	paddr_t pa;
920 	int rv, i;
921 
922 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
923 	st = (struct i2o_status *)sc->sc_scr;
924 
925 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_status_get);
926 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_STATUS_GET);
927 	mf.reserved[0] = 0;
928 	mf.reserved[1] = 0;
929 	mf.reserved[2] = 0;
930 	mf.reserved[3] = 0;
931 	mf.addrlow = (u_int32_t)pa;
932 	mf.addrhigh = (u_int32_t)((u_int64_t)pa >> 32);
933 	mf.length = sizeof(sc->sc_status);
934 
935 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
936 	    BUS_DMASYNC_PREWRITE);
937 	memset(st, 0, sizeof(*st));
938 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*st),
939 	    BUS_DMASYNC_POSTWRITE);
940 
941 	if ((rv = iop_post(sc, (u_int32_t *)&mf)) != 0)
942 		return (rv);
943 
944 	for (i = 100; i != 0; i--) {
945 		bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0,
946 		    sizeof(*st), BUS_DMASYNC_POSTREAD);
947 		if (st->syncbyte == 0xff)
948 			break;
949 		if (nosleep)
950 			DELAY(100*1000);
951 		else
952 			kpause("iopstat", false, hz / 10, NULL);
953 	}
954 
955 	if (st->syncbyte != 0xff) {
956 		aprint_error_dev(sc->sc_dev, "STATUS_GET timed out\n");
957 		rv = EIO;
958 	} else {
959 		memcpy(&sc->sc_status, st, sizeof(sc->sc_status));
960 		rv = 0;
961 	}
962 
963 	return (rv);
964 }
965 
966 /*
967  * Initialize and populate the IOP's outbound FIFO.
968  */
969 static int
iop_ofifo_init(struct iop_softc * sc)970 iop_ofifo_init(struct iop_softc *sc)
971 {
972 	bus_addr_t addr;
973 	bus_dma_segment_t seg;
974 	struct i2o_exec_outbound_init *mf;
975 	int i, rseg, rv;
976 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)], *sw;
977 
978 	sw = (u_int32_t *)sc->sc_scr;
979 
980 	mf = (struct i2o_exec_outbound_init *)mb;
981 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_outbound_init);
982 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_OUTBOUND_INIT);
983 	mf->msgictx = IOP_ICTX;
984 	mf->msgtctx = 0;
985 	mf->pagesize = PAGE_SIZE;
986 	mf->flags = IOP_INIT_CODE | ((sc->sc_framesize >> 2) << 16);
987 
988 	/*
989 	 * The I2O spec says that there are two SGLs: one for the status
990 	 * word, and one for a list of discarded MFAs.  It continues to say
991 	 * that if you don't want to get the list of MFAs, an IGNORE SGL is
992 	 * necessary; this isn't the case (and is in fact a bad thing).
993 	 */
994 	mb[sizeof(*mf) / sizeof(u_int32_t) + 0] = sizeof(*sw) |
995 	    I2O_SGL_SIMPLE | I2O_SGL_END_BUFFER | I2O_SGL_END;
996 	mb[sizeof(*mf) / sizeof(u_int32_t) + 1] =
997 	    (u_int32_t)sc->sc_scr_dmamap->dm_segs[0].ds_addr;
998 	mb[0] += 2 << 16;
999 
1000 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1001 	    BUS_DMASYNC_POSTWRITE);
1002 	*sw = 0;
1003 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1004 	    BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1005 
1006 	if ((rv = iop_post(sc, mb)) != 0)
1007 		return (rv);
1008 
1009 	POLL(5000,
1010 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1011 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD),
1012 	    *sw == htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)));
1013 
1014 	if (*sw != htole32(I2O_EXEC_OUTBOUND_INIT_COMPLETE)) {
1015 		aprint_error_dev(sc->sc_dev, "outbound FIFO init failed (%d)\n",
1016 		    le32toh(*sw));
1017 		return (EIO);
1018 	}
1019 
1020 	/* Allocate DMA safe memory for the reply frames. */
1021 	if (sc->sc_rep_phys == 0) {
1022 		sc->sc_rep_size = sc->sc_maxob * sc->sc_framesize;
1023 
1024 		rv = bus_dmamem_alloc(sc->sc_dmat, sc->sc_rep_size, PAGE_SIZE,
1025 		    0, &seg, 1, &rseg, BUS_DMA_NOWAIT);
1026 		if (rv != 0) {
1027 			aprint_error_dev(sc->sc_dev, "DMA alloc = %d\n",
1028 			   rv);
1029 			return (rv);
1030 		}
1031 
1032 		rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sc->sc_rep_size,
1033 		    &sc->sc_rep, BUS_DMA_NOWAIT | BUS_DMA_COHERENT);
1034 		if (rv != 0) {
1035 			aprint_error_dev(sc->sc_dev, "DMA map = %d\n", rv);
1036 			return (rv);
1037 		}
1038 
1039 		rv = bus_dmamap_create(sc->sc_dmat, sc->sc_rep_size, 1,
1040 		    sc->sc_rep_size, 0, BUS_DMA_NOWAIT, &sc->sc_rep_dmamap);
1041 		if (rv != 0) {
1042 			aprint_error_dev(sc->sc_dev, "DMA create = %d\n", rv);
1043 			return (rv);
1044 		}
1045 
1046 		rv = bus_dmamap_load(sc->sc_dmat, sc->sc_rep_dmamap,
1047 		    sc->sc_rep, sc->sc_rep_size, NULL, BUS_DMA_NOWAIT);
1048 		if (rv != 0) {
1049 			aprint_error_dev(sc->sc_dev, "DMA load = %d\n", rv);
1050 			return (rv);
1051 		}
1052 
1053 		sc->sc_rep_phys = sc->sc_rep_dmamap->dm_segs[0].ds_addr;
1054 
1055 		/* Now safe to sync the reply map. */
1056 		sc->sc_curib = 0;
1057 	}
1058 
1059 	/* Populate the outbound FIFO. */
1060 	for (i = sc->sc_maxob, addr = sc->sc_rep_phys; i != 0; i--) {
1061 		iop_outl(sc, IOP_REG_OFIFO, (u_int32_t)addr);
1062 		addr += sc->sc_framesize;
1063 	}
1064 
1065 	return (0);
1066 }
1067 
1068 /*
1069  * Read the specified number of bytes from the IOP's hardware resource table.
1070  */
1071 static int
iop_hrt_get0(struct iop_softc * sc,struct i2o_hrt * hrt,int size)1072 iop_hrt_get0(struct iop_softc *sc, struct i2o_hrt *hrt, int size)
1073 {
1074 	struct iop_msg *im;
1075 	int rv;
1076 	struct i2o_exec_hrt_get *mf;
1077 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1078 
1079 	im = iop_msg_alloc(sc, IM_WAIT);
1080 	mf = (struct i2o_exec_hrt_get *)mb;
1081 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_hrt_get);
1082 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_HRT_GET);
1083 	mf->msgictx = IOP_ICTX;
1084 	mf->msgtctx = im->im_tctx;
1085 
1086 	iop_msg_map(sc, im, mb, hrt, size, 0, NULL);
1087 	rv = iop_msg_post(sc, im, mb, 30000);
1088 	iop_msg_unmap(sc, im);
1089 	iop_msg_free(sc, im);
1090 	return (rv);
1091 }
1092 
1093 /*
1094  * Read the IOP's hardware resource table.
1095  */
1096 static int
iop_hrt_get(struct iop_softc * sc)1097 iop_hrt_get(struct iop_softc *sc)
1098 {
1099 	struct i2o_hrt hrthdr, *hrt;
1100 	int size, rv;
1101 
1102 	rv = iop_hrt_get0(sc, &hrthdr, sizeof(hrthdr));
1103 	if (rv != 0)
1104 		return (rv);
1105 
1106 	DPRINTF(("%s: %d hrt entries\n", device_xname(sc->sc_dev),
1107 	    le16toh(hrthdr.numentries)));
1108 
1109 	size = sizeof(struct i2o_hrt) +
1110 	    (le16toh(hrthdr.numentries) - 1) * sizeof(struct i2o_hrt_entry);
1111 	hrt = (struct i2o_hrt *)malloc(size, M_DEVBUF, M_NOWAIT);
1112 
1113 	if ((rv = iop_hrt_get0(sc, hrt, size)) != 0) {
1114 		free(hrt, M_DEVBUF);
1115 		return (rv);
1116 	}
1117 
1118 	if (sc->sc_hrt != NULL)
1119 		free(sc->sc_hrt, M_DEVBUF);
1120 	sc->sc_hrt = hrt;
1121 	return (0);
1122 }
1123 
1124 /*
1125  * Request the specified number of bytes from the IOP's logical
1126  * configuration table.  If a change indicator is specified, this
1127  * is a verbatim notification request, so the caller is prepared
1128  * to wait indefinitely.
1129  */
1130 static int
iop_lct_get0(struct iop_softc * sc,struct i2o_lct * lct,int size,u_int32_t chgind)1131 iop_lct_get0(struct iop_softc *sc, struct i2o_lct *lct, int size,
1132 	     u_int32_t chgind)
1133 {
1134 	struct iop_msg *im;
1135 	struct i2o_exec_lct_notify *mf;
1136 	int rv;
1137 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1138 
1139 	im = iop_msg_alloc(sc, IM_WAIT);
1140 	memset(lct, 0, size);
1141 
1142 	mf = (struct i2o_exec_lct_notify *)mb;
1143 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_lct_notify);
1144 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_LCT_NOTIFY);
1145 	mf->msgictx = IOP_ICTX;
1146 	mf->msgtctx = im->im_tctx;
1147 	mf->classid = I2O_CLASS_ANY;
1148 	mf->changeindicator = chgind;
1149 
1150 #ifdef I2ODEBUG
1151 	printf("iop_lct_get0: reading LCT");
1152 	if (chgind != 0)
1153 		printf(" (async)");
1154 	printf("\n");
1155 #endif
1156 
1157 	iop_msg_map(sc, im, mb, lct, size, 0, NULL);
1158 	rv = iop_msg_post(sc, im, mb, (chgind == 0 ? 120*1000 : 0));
1159 	iop_msg_unmap(sc, im);
1160 	iop_msg_free(sc, im);
1161 	return (rv);
1162 }
1163 
1164 /*
1165  * Read the IOP's logical configuration table.
1166  */
1167 int
iop_lct_get(struct iop_softc * sc)1168 iop_lct_get(struct iop_softc *sc)
1169 {
1170 	int esize, size, rv;
1171 	struct i2o_lct *lct;
1172 
1173 	esize = le32toh(sc->sc_status.expectedlctsize);
1174 	lct = (struct i2o_lct *)malloc(esize, M_DEVBUF, M_WAITOK);
1175 	if (lct == NULL)
1176 		return (ENOMEM);
1177 
1178 	if ((rv = iop_lct_get0(sc, lct, esize, 0)) != 0) {
1179 		free(lct, M_DEVBUF);
1180 		return (rv);
1181 	}
1182 
1183 	size = le16toh(lct->tablesize) << 2;
1184 	if (esize != size) {
1185 		free(lct, M_DEVBUF);
1186 		lct = (struct i2o_lct *)malloc(size, M_DEVBUF, M_WAITOK);
1187 		if (lct == NULL)
1188 			return (ENOMEM);
1189 
1190 		if ((rv = iop_lct_get0(sc, lct, size, 0)) != 0) {
1191 			free(lct, M_DEVBUF);
1192 			return (rv);
1193 		}
1194 	}
1195 
1196 	/* Swap in the new LCT. */
1197 	if (sc->sc_lct != NULL)
1198 		free(sc->sc_lct, M_DEVBUF);
1199 	sc->sc_lct = lct;
1200 	sc->sc_nlctent = ((le16toh(sc->sc_lct->tablesize) << 2) -
1201 	    sizeof(struct i2o_lct) + sizeof(struct i2o_lct_entry)) /
1202 	    sizeof(struct i2o_lct_entry);
1203 	return (0);
1204 }
1205 
1206 /*
1207  * Post a SYS_ENABLE message to the adapter.
1208  */
1209 int
iop_sys_enable(struct iop_softc * sc)1210 iop_sys_enable(struct iop_softc *sc)
1211 {
1212 	struct iop_msg *im;
1213 	struct i2o_msg mf;
1214 	int rv;
1215 
1216 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
1217 
1218 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1219 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_ENABLE);
1220 	mf.msgictx = IOP_ICTX;
1221 	mf.msgtctx = im->im_tctx;
1222 
1223 	rv = iop_msg_post(sc, im, &mf, 30000);
1224 	if (rv == 0) {
1225 		if ((im->im_flags & IM_FAIL) != 0)
1226 			rv = ENXIO;
1227 		else if (im->im_reqstatus == I2O_STATUS_SUCCESS ||
1228 		    (im->im_reqstatus == I2O_STATUS_ERROR_NO_DATA_XFER &&
1229 		    im->im_detstatus == I2O_DSC_INVALID_REQUEST))
1230 			rv = 0;
1231 		else
1232 			rv = EIO;
1233 	}
1234 
1235 	iop_msg_free(sc, im);
1236 	return (rv);
1237 }
1238 
1239 /*
1240  * Request the specified parameter group from the target.  If an initiator
1241  * is specified (a) don't wait for the operation to complete, but instead
1242  * let the initiator's interrupt handler deal with the reply and (b) place a
1243  * pointer to the parameter group op in the wrapper's `im_dvcontext' field.
1244  */
1245 int
iop_field_get_all(struct iop_softc * sc,int tid,int group,void * buf,int size,struct iop_initiator * ii)1246 iop_field_get_all(struct iop_softc *sc, int tid, int group, void *buf,
1247 		  int size, struct iop_initiator *ii)
1248 {
1249 	struct iop_msg *im;
1250 	struct i2o_util_params_op *mf;
1251 	int rv;
1252 	struct iop_pgop *pgop;
1253 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1254 
1255 	im = iop_msg_alloc(sc, (ii == NULL ? IM_WAIT : 0) | IM_NOSTATUS);
1256 	if ((pgop = malloc(sizeof(*pgop), M_DEVBUF, M_WAITOK)) == NULL) {
1257 		iop_msg_free(sc, im);
1258 		return (ENOMEM);
1259 	}
1260 	im->im_dvcontext = pgop;
1261 
1262 	mf = (struct i2o_util_params_op *)mb;
1263 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1264 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_GET);
1265 	mf->msgictx = IOP_ICTX;
1266 	mf->msgtctx = im->im_tctx;
1267 	mf->flags = 0;
1268 
1269 	pgop->olh.count = htole16(1);
1270 	pgop->olh.reserved = htole16(0);
1271 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_GET);
1272 	pgop->oat.fieldcount = htole16(0xffff);
1273 	pgop->oat.group = htole16(group);
1274 
1275 	memset(buf, 0, size);
1276 	iop_msg_map(sc, im, mb, pgop, sizeof(*pgop), 1, NULL);
1277 	iop_msg_map(sc, im, mb, buf, size, 0, NULL);
1278 	rv = iop_msg_post(sc, im, mb, (ii == NULL ? 30000 : 0));
1279 
1280 	/* Detect errors; let partial transfers to count as success. */
1281 	if (ii == NULL && rv == 0) {
1282 		if (im->im_reqstatus == I2O_STATUS_ERROR_PARTIAL_XFER &&
1283 		    im->im_detstatus == I2O_DSC_UNKNOWN_ERROR)
1284 			rv = 0;
1285 		else
1286 			rv = (im->im_reqstatus != 0 ? EIO : 0);
1287 
1288 		if (rv != 0)
1289 			printf("%s: FIELD_GET failed for tid %d group %d\n",
1290 			    device_xname(sc->sc_dev), tid, group);
1291 	}
1292 
1293 	if (ii == NULL || rv != 0) {
1294 		iop_msg_unmap(sc, im);
1295 		iop_msg_free(sc, im);
1296 		free(pgop, M_DEVBUF);
1297 	}
1298 
1299 	return (rv);
1300 }
1301 
1302 /*
1303  * Set a single field in a scalar parameter group.
1304  */
1305 int
iop_field_set(struct iop_softc * sc,int tid,int group,void * buf,int size,int field)1306 iop_field_set(struct iop_softc *sc, int tid, int group, void *buf,
1307 	      int size, int field)
1308 {
1309 	struct iop_msg *im;
1310 	struct i2o_util_params_op *mf;
1311 	struct iop_pgop *pgop;
1312 	int rv, totsize;
1313 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1314 
1315 	totsize = sizeof(*pgop) + size;
1316 
1317 	im = iop_msg_alloc(sc, IM_WAIT);
1318 	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1319 		iop_msg_free(sc, im);
1320 		return (ENOMEM);
1321 	}
1322 
1323 	mf = (struct i2o_util_params_op *)mb;
1324 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1325 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1326 	mf->msgictx = IOP_ICTX;
1327 	mf->msgtctx = im->im_tctx;
1328 	mf->flags = 0;
1329 
1330 	pgop->olh.count = htole16(1);
1331 	pgop->olh.reserved = htole16(0);
1332 	pgop->oat.operation = htole16(I2O_PARAMS_OP_FIELD_SET);
1333 	pgop->oat.fieldcount = htole16(1);
1334 	pgop->oat.group = htole16(group);
1335 	pgop->oat.fields[0] = htole16(field);
1336 	memcpy(pgop + 1, buf, size);
1337 
1338 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1339 	rv = iop_msg_post(sc, im, mb, 30000);
1340 	if (rv != 0)
1341 		aprint_error_dev(sc->sc_dev, "FIELD_SET failed for tid %d group %d\n",
1342 		    tid, group);
1343 
1344 	iop_msg_unmap(sc, im);
1345 	iop_msg_free(sc, im);
1346 	free(pgop, M_DEVBUF);
1347 	return (rv);
1348 }
1349 
1350 /*
1351  * Delete all rows in a tablular parameter group.
1352  */
1353 int
iop_table_clear(struct iop_softc * sc,int tid,int group)1354 iop_table_clear(struct iop_softc *sc, int tid, int group)
1355 {
1356 	struct iop_msg *im;
1357 	struct i2o_util_params_op *mf;
1358 	struct iop_pgop pgop;
1359 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1360 	int rv;
1361 
1362 	im = iop_msg_alloc(sc, IM_WAIT);
1363 
1364 	mf = (struct i2o_util_params_op *)mb;
1365 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1366 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1367 	mf->msgictx = IOP_ICTX;
1368 	mf->msgtctx = im->im_tctx;
1369 	mf->flags = 0;
1370 
1371 	pgop.olh.count = htole16(1);
1372 	pgop.olh.reserved = htole16(0);
1373 	pgop.oat.operation = htole16(I2O_PARAMS_OP_TABLE_CLEAR);
1374 	pgop.oat.fieldcount = htole16(0);
1375 	pgop.oat.group = htole16(group);
1376 	pgop.oat.fields[0] = htole16(0);
1377 
1378 	iop_msg_map(sc, im, mb, &pgop, sizeof(pgop), 1, NULL);
1379 	rv = iop_msg_post(sc, im, mb, 30000);
1380 	if (rv != 0)
1381 		aprint_error_dev(sc->sc_dev, "TABLE_CLEAR failed for tid %d group %d\n",
1382 		    tid, group);
1383 
1384 	iop_msg_unmap(sc, im);
1385 	iop_msg_free(sc, im);
1386 	return (rv);
1387 }
1388 
1389 /*
1390  * Add a single row to a tabular parameter group.  The row can have only one
1391  * field.
1392  */
1393 int
iop_table_add_row(struct iop_softc * sc,int tid,int group,void * buf,int size,int row)1394 iop_table_add_row(struct iop_softc *sc, int tid, int group, void *buf,
1395 		  int size, int row)
1396 {
1397 	struct iop_msg *im;
1398 	struct i2o_util_params_op *mf;
1399 	struct iop_pgop *pgop;
1400 	int rv, totsize;
1401 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1402 
1403 	totsize = sizeof(*pgop) + sizeof(u_int16_t) * 2 + size;
1404 
1405 	im = iop_msg_alloc(sc, IM_WAIT);
1406 	if ((pgop = malloc(totsize, M_DEVBUF, M_WAITOK)) == NULL) {
1407 		iop_msg_free(sc, im);
1408 		return (ENOMEM);
1409 	}
1410 
1411 	mf = (struct i2o_util_params_op *)mb;
1412 	mf->msgflags = I2O_MSGFLAGS(i2o_util_params_op);
1413 	mf->msgfunc = I2O_MSGFUNC(tid, I2O_UTIL_PARAMS_SET);
1414 	mf->msgictx = IOP_ICTX;
1415 	mf->msgtctx = im->im_tctx;
1416 	mf->flags = 0;
1417 
1418 	pgop->olh.count = htole16(1);
1419 	pgop->olh.reserved = htole16(0);
1420 	pgop->oat.operation = htole16(I2O_PARAMS_OP_ROW_ADD);
1421 	pgop->oat.fieldcount = htole16(1);
1422 	pgop->oat.group = htole16(group);
1423 	pgop->oat.fields[0] = htole16(0);	/* FieldIdx */
1424 	pgop->oat.fields[1] = htole16(1);	/* RowCount */
1425 	pgop->oat.fields[2] = htole16(row);	/* KeyValue */
1426 	memcpy(&pgop->oat.fields[3], buf, size);
1427 
1428 	iop_msg_map(sc, im, mb, pgop, totsize, 1, NULL);
1429 	rv = iop_msg_post(sc, im, mb, 30000);
1430 	if (rv != 0)
1431 		aprint_error_dev(sc->sc_dev, "ADD_ROW failed for tid %d group %d row %d\n",
1432 		    tid, group, row);
1433 
1434 	iop_msg_unmap(sc, im);
1435 	iop_msg_free(sc, im);
1436 	free(pgop, M_DEVBUF);
1437 	return (rv);
1438 }
1439 
1440 /*
1441  * Execute a simple command (no parameters).
1442  */
1443 int
iop_simple_cmd(struct iop_softc * sc,int tid,int function,int ictx,int async,int timo)1444 iop_simple_cmd(struct iop_softc *sc, int tid, int function, int ictx,
1445 	       int async, int timo)
1446 {
1447 	struct iop_msg *im;
1448 	struct i2o_msg mf;
1449 	int rv, fl;
1450 
1451 	fl = (async != 0 ? IM_WAIT : IM_POLL);
1452 	im = iop_msg_alloc(sc, fl);
1453 
1454 	mf.msgflags = I2O_MSGFLAGS(i2o_msg);
1455 	mf.msgfunc = I2O_MSGFUNC(tid, function);
1456 	mf.msgictx = ictx;
1457 	mf.msgtctx = im->im_tctx;
1458 
1459 	rv = iop_msg_post(sc, im, &mf, timo);
1460 	iop_msg_free(sc, im);
1461 	return (rv);
1462 }
1463 
1464 /*
1465  * Post the system table to the IOP.
1466  */
1467 static int
iop_systab_set(struct iop_softc * sc)1468 iop_systab_set(struct iop_softc *sc)
1469 {
1470 	struct i2o_exec_sys_tab_set *mf;
1471 	struct iop_msg *im;
1472 	bus_space_handle_t bsh;
1473 	bus_addr_t boo;
1474 	u_int32_t mema[2], ioa[2];
1475 	int rv;
1476 	u_int32_t mb[IOP_MAX_MSG_SIZE / sizeof(u_int32_t)];
1477 
1478 	im = iop_msg_alloc(sc, IM_WAIT);
1479 
1480 	mf = (struct i2o_exec_sys_tab_set *)mb;
1481 	mf->msgflags = I2O_MSGFLAGS(i2o_exec_sys_tab_set);
1482 	mf->msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_SYS_TAB_SET);
1483 	mf->msgictx = IOP_ICTX;
1484 	mf->msgtctx = im->im_tctx;
1485 	mf->iopid = (device_unit(sc->sc_dev) + 2) << 12;
1486 	mf->segnumber = 0;
1487 
1488 	mema[1] = sc->sc_status.desiredprivmemsize;
1489 	ioa[1] = sc->sc_status.desiredpriviosize;
1490 
1491 	if (mema[1] != 0) {
1492 		rv = bus_space_alloc(sc->sc_bus_memt, 0, 0xffffffff,
1493 		    le32toh(mema[1]), PAGE_SIZE, 0, 0, &boo, &bsh);
1494 		mema[0] = htole32(boo);
1495 		if (rv != 0) {
1496 			aprint_error_dev(sc->sc_dev, "can't alloc priv mem space, err = %d\n", rv);
1497 			mema[0] = 0;
1498 			mema[1] = 0;
1499 		}
1500 	}
1501 
1502 	if (ioa[1] != 0) {
1503 		rv = bus_space_alloc(sc->sc_bus_iot, 0, 0xffff,
1504 		    le32toh(ioa[1]), 0, 0, 0, &boo, &bsh);
1505 		ioa[0] = htole32(boo);
1506 		if (rv != 0) {
1507 			aprint_error_dev(sc->sc_dev, "can't alloc priv i/o space, err = %d\n", rv);
1508 			ioa[0] = 0;
1509 			ioa[1] = 0;
1510 		}
1511 	}
1512 
1513 	iop_msg_map(sc, im, mb, iop_systab, iop_systab_size, 1, NULL);
1514 	iop_msg_map(sc, im, mb, mema, sizeof(mema), 1, NULL);
1515 	iop_msg_map(sc, im, mb, ioa, sizeof(ioa), 1, NULL);
1516 	rv = iop_msg_post(sc, im, mb, 5000);
1517 	iop_msg_unmap(sc, im);
1518 	iop_msg_free(sc, im);
1519 	return (rv);
1520 }
1521 
1522 /*
1523  * Reset the IOP.  Must be called with interrupts disabled.
1524  */
1525 static int
iop_reset(struct iop_softc * sc)1526 iop_reset(struct iop_softc *sc)
1527 {
1528 	u_int32_t mfa, *sw;
1529 	struct i2o_exec_iop_reset mf;
1530 	int rv;
1531 	paddr_t pa;
1532 
1533 	sw = (u_int32_t *)sc->sc_scr;
1534 	pa = sc->sc_scr_dmamap->dm_segs[0].ds_addr;
1535 
1536 	mf.msgflags = I2O_MSGFLAGS(i2o_exec_iop_reset);
1537 	mf.msgfunc = I2O_MSGFUNC(I2O_TID_IOP, I2O_EXEC_IOP_RESET);
1538 	mf.reserved[0] = 0;
1539 	mf.reserved[1] = 0;
1540 	mf.reserved[2] = 0;
1541 	mf.reserved[3] = 0;
1542 	mf.statuslow = (u_int32_t)pa;
1543 	mf.statushigh = (u_int32_t)((u_int64_t)pa >> 32);
1544 
1545 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1546 	    BUS_DMASYNC_POSTWRITE);
1547 	*sw = htole32(0);
1548 	bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1549 	    BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1550 
1551 	if ((rv = iop_post(sc, (u_int32_t *)&mf)))
1552 		return (rv);
1553 
1554 	POLL(2500,
1555 	    (bus_dmamap_sync(sc->sc_dmat, sc->sc_scr_dmamap, 0, sizeof(*sw),
1556 	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD), *sw != 0));
1557 	if (*sw != htole32(I2O_RESET_IN_PROGRESS)) {
1558 		aprint_error_dev(sc->sc_dev, "reset rejected, status 0x%x\n",
1559 		    le32toh(*sw));
1560 		return (EIO);
1561 	}
1562 
1563 	/*
1564 	 * IOP is now in the INIT state.  Wait no more than 10 seconds for
1565 	 * the inbound queue to become responsive.
1566 	 */
1567 	POLL(10000, (mfa = iop_inl(sc, IOP_REG_IFIFO)) != IOP_MFA_EMPTY);
1568 	if (mfa == IOP_MFA_EMPTY) {
1569 		aprint_error_dev(sc->sc_dev, "reset failed\n");
1570 		return (EIO);
1571 	}
1572 
1573 	iop_release_mfa(sc, mfa);
1574 	return (0);
1575 }
1576 
1577 /*
1578  * Register a new initiator.  Must be called with the configuration lock
1579  * held.
1580  */
1581 void
iop_initiator_register(struct iop_softc * sc,struct iop_initiator * ii)1582 iop_initiator_register(struct iop_softc *sc, struct iop_initiator *ii)
1583 {
1584 	static int ictxgen;
1585 
1586 	/* 0 is reserved (by us) for system messages. */
1587 	ii->ii_ictx = ++ictxgen;
1588 
1589 	/*
1590 	 * `Utility initiators' don't make it onto the per-IOP initiator list
1591 	 * (which is used only for configuration), but do get one slot on
1592 	 * the inbound queue.
1593 	 */
1594 	if ((ii->ii_flags & II_UTILITY) == 0) {
1595 		LIST_INSERT_HEAD(&sc->sc_iilist, ii, ii_list);
1596 		sc->sc_nii++;
1597 	} else
1598 		sc->sc_nuii++;
1599 
1600 	cv_init(&ii->ii_cv, "iopevt");
1601 
1602 	mutex_spin_enter(&sc->sc_intrlock);
1603 	LIST_INSERT_HEAD(IOP_ICTXHASH(ii->ii_ictx), ii, ii_hash);
1604 	mutex_spin_exit(&sc->sc_intrlock);
1605 }
1606 
1607 /*
1608  * Unregister an initiator.  Must be called with the configuration lock
1609  * held.
1610  */
1611 void
iop_initiator_unregister(struct iop_softc * sc,struct iop_initiator * ii)1612 iop_initiator_unregister(struct iop_softc *sc, struct iop_initiator *ii)
1613 {
1614 
1615 	if ((ii->ii_flags & II_UTILITY) == 0) {
1616 		LIST_REMOVE(ii, ii_list);
1617 		sc->sc_nii--;
1618 	} else
1619 		sc->sc_nuii--;
1620 
1621 	mutex_spin_enter(&sc->sc_intrlock);
1622 	LIST_REMOVE(ii, ii_hash);
1623 	mutex_spin_exit(&sc->sc_intrlock);
1624 
1625 	cv_destroy(&ii->ii_cv);
1626 }
1627 
1628 /*
1629  * Handle a reply frame from the IOP.
1630  */
1631 static int
iop_handle_reply(struct iop_softc * sc,u_int32_t rmfa)1632 iop_handle_reply(struct iop_softc *sc, u_int32_t rmfa)
1633 {
1634 	struct iop_msg *im;
1635 	struct i2o_reply *rb;
1636 	struct i2o_fault_notify *fn;
1637 	struct iop_initiator *ii;
1638 	u_int off, ictx, tctx, status, size;
1639 
1640 	KASSERT(mutex_owned(&sc->sc_intrlock));
1641 
1642 	off = (int)(rmfa - sc->sc_rep_phys);
1643 	rb = (struct i2o_reply *)((char *)sc->sc_rep + off);
1644 
1645 	/* Perform reply queue DMA synchronisation. */
1646 	bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, off,
1647 	    sc->sc_framesize, BUS_DMASYNC_POSTREAD);
1648 
1649 #ifdef I2ODEBUG
1650 	if ((le32toh(rb->msgflags) & I2O_MSGFLAGS_64BIT) != 0)
1651 		panic("iop_handle_reply: 64-bit reply");
1652 #endif
1653 	/*
1654 	 * Find the initiator.
1655 	 */
1656 	ictx = le32toh(rb->msgictx);
1657 	if (ictx == IOP_ICTX)
1658 		ii = NULL;
1659 	else {
1660 		ii = LIST_FIRST(IOP_ICTXHASH(ictx));
1661 		for (; ii != NULL; ii = LIST_NEXT(ii, ii_hash))
1662 			if (ii->ii_ictx == ictx)
1663 				break;
1664 		if (ii == NULL) {
1665 #ifdef I2ODEBUG
1666 			iop_reply_print(sc, rb);
1667 #endif
1668 			aprint_error_dev(sc->sc_dev, "WARNING: bad ictx returned (%x)\n",
1669 			    ictx);
1670 			return (-1);
1671 		}
1672 	}
1673 
1674 	/*
1675 	 * If we received a transport failure notice, we've got to dig the
1676 	 * transaction context (if any) out of the original message frame,
1677 	 * and then release the original MFA back to the inbound FIFO.
1678 	 */
1679 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0) {
1680 		status = I2O_STATUS_SUCCESS;
1681 
1682 		fn = (struct i2o_fault_notify *)rb;
1683 		tctx = iop_inl_msg(sc, fn->lowmfa + 12);
1684 		iop_release_mfa(sc, fn->lowmfa);
1685 		iop_tfn_print(sc, fn);
1686 	} else {
1687 		status = rb->reqstatus;
1688 		tctx = le32toh(rb->msgtctx);
1689 	}
1690 
1691 	if (ii == NULL || (ii->ii_flags & II_NOTCTX) == 0) {
1692 		/*
1693 		 * This initiator tracks state using message wrappers.
1694 		 *
1695 		 * Find the originating message wrapper, and if requested
1696 		 * notify the initiator.
1697 		 */
1698 		im = sc->sc_ims + (tctx & IOP_TCTX_MASK);
1699 		if ((tctx & IOP_TCTX_MASK) > sc->sc_maxib ||
1700 		    (im->im_flags & IM_ALLOCED) == 0 ||
1701 		    tctx != im->im_tctx) {
1702 			aprint_error_dev(sc->sc_dev, "WARNING: bad tctx returned (0x%08x, %p)\n", tctx, im);
1703 			if (im != NULL)
1704 				aprint_error_dev(sc->sc_dev, "flags=0x%08x tctx=0x%08x\n",
1705 				    im->im_flags, im->im_tctx);
1706 #ifdef I2ODEBUG
1707 			if ((rb->msgflags & I2O_MSGFLAGS_FAIL) == 0)
1708 				iop_reply_print(sc, rb);
1709 #endif
1710 			return (-1);
1711 		}
1712 
1713 		if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1714 			im->im_flags |= IM_FAIL;
1715 
1716 #ifdef I2ODEBUG
1717 		if ((im->im_flags & IM_REPLIED) != 0)
1718 			panic("%s: dup reply", device_xname(sc->sc_dev));
1719 #endif
1720 		im->im_flags |= IM_REPLIED;
1721 
1722 #ifdef I2ODEBUG
1723 		if (status != I2O_STATUS_SUCCESS)
1724 			iop_reply_print(sc, rb);
1725 #endif
1726 		im->im_reqstatus = status;
1727 		im->im_detstatus = le16toh(rb->detail);
1728 
1729 		/* Copy the reply frame, if requested. */
1730 		if (im->im_rb != NULL) {
1731 			size = (le32toh(rb->msgflags) >> 14) & ~3;
1732 #ifdef I2ODEBUG
1733 			if (size > sc->sc_framesize)
1734 				panic("iop_handle_reply: reply too large");
1735 #endif
1736 			memcpy(im->im_rb, rb, size);
1737 		}
1738 
1739 		/* Notify the initiator. */
1740 		if ((im->im_flags & IM_WAIT) != 0)
1741 			cv_broadcast(&im->im_cv);
1742 		else if ((im->im_flags & (IM_POLL | IM_POLL_INTR)) != IM_POLL) {
1743 			if (ii != NULL) {
1744 				mutex_spin_exit(&sc->sc_intrlock);
1745 				(*ii->ii_intr)(ii->ii_dv, im, rb);
1746 				mutex_spin_enter(&sc->sc_intrlock);
1747 			}
1748 		}
1749 	} else {
1750 		/*
1751 		 * This initiator discards message wrappers.
1752 		 *
1753 		 * Simply pass the reply frame to the initiator.
1754 		 */
1755 		if (ii != NULL) {
1756 			mutex_spin_exit(&sc->sc_intrlock);
1757 			(*ii->ii_intr)(ii->ii_dv, NULL, rb);
1758 			mutex_spin_enter(&sc->sc_intrlock);
1759 		}
1760 	}
1761 
1762 	return (status);
1763 }
1764 
1765 /*
1766  * Handle an interrupt from the IOP.
1767  */
1768 int
iop_intr(void * arg)1769 iop_intr(void *arg)
1770 {
1771 	struct iop_softc *sc;
1772 	u_int32_t rmfa;
1773 
1774 	sc = arg;
1775 
1776 	mutex_spin_enter(&sc->sc_intrlock);
1777 
1778 	if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) == 0) {
1779 		mutex_spin_exit(&sc->sc_intrlock);
1780 		return (0);
1781 	}
1782 
1783 	for (;;) {
1784 		/* Double read to account for IOP bug. */
1785 		if ((rmfa = iop_inl(sc, IOP_REG_OFIFO)) == IOP_MFA_EMPTY) {
1786 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
1787 			if (rmfa == IOP_MFA_EMPTY)
1788 				break;
1789 		}
1790 		iop_handle_reply(sc, rmfa);
1791 		iop_outl(sc, IOP_REG_OFIFO, rmfa);
1792 	}
1793 
1794 	mutex_spin_exit(&sc->sc_intrlock);
1795 	return (1);
1796 }
1797 
1798 /*
1799  * Handle an event signalled by the executive.
1800  */
1801 static void
iop_intr_event(device_t dv,struct iop_msg * im,void * reply)1802 iop_intr_event(device_t dv, struct iop_msg *im, void *reply)
1803 {
1804 	struct i2o_util_event_register_reply *rb;
1805 	u_int event;
1806 
1807 	rb = reply;
1808 
1809 	if ((rb->msgflags & I2O_MSGFLAGS_FAIL) != 0)
1810 		return;
1811 
1812 	event = le32toh(rb->event);
1813 	printf("%s: event 0x%08x received\n", device_xname(dv), event);
1814 }
1815 
1816 /*
1817  * Allocate a message wrapper.
1818  */
1819 struct iop_msg *
iop_msg_alloc(struct iop_softc * sc,int flags)1820 iop_msg_alloc(struct iop_softc *sc, int flags)
1821 {
1822 	struct iop_msg *im;
1823 	static u_int tctxgen;
1824 	int i;
1825 
1826 #ifdef I2ODEBUG
1827 	if ((flags & IM_SYSMASK) != 0)
1828 		panic("iop_msg_alloc: system flags specified");
1829 #endif
1830 
1831 	mutex_spin_enter(&sc->sc_intrlock);
1832 	im = SLIST_FIRST(&sc->sc_im_freelist);
1833 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
1834 	if (im == NULL)
1835 		panic("iop_msg_alloc: no free wrappers");
1836 #endif
1837 	SLIST_REMOVE_HEAD(&sc->sc_im_freelist, im_chain);
1838 	mutex_spin_exit(&sc->sc_intrlock);
1839 
1840 	im->im_tctx = (im->im_tctx & IOP_TCTX_MASK) | tctxgen;
1841 	tctxgen += (1 << IOP_TCTX_SHIFT);
1842 	im->im_flags = flags | IM_ALLOCED;
1843 	im->im_rb = NULL;
1844 	i = 0;
1845 	do {
1846 		im->im_xfer[i++].ix_size = 0;
1847 	} while (i < IOP_MAX_MSG_XFERS);
1848 
1849 	return (im);
1850 }
1851 
1852 /*
1853  * Free a message wrapper.
1854  */
1855 void
iop_msg_free(struct iop_softc * sc,struct iop_msg * im)1856 iop_msg_free(struct iop_softc *sc, struct iop_msg *im)
1857 {
1858 
1859 #ifdef I2ODEBUG
1860 	if ((im->im_flags & IM_ALLOCED) == 0)
1861 		panic("iop_msg_free: wrapper not allocated");
1862 #endif
1863 
1864 	im->im_flags = 0;
1865 	mutex_spin_enter(&sc->sc_intrlock);
1866 	SLIST_INSERT_HEAD(&sc->sc_im_freelist, im, im_chain);
1867 	mutex_spin_exit(&sc->sc_intrlock);
1868 }
1869 
1870 /*
1871  * Map a data transfer.  Write a scatter-gather list into the message frame.
1872  */
1873 int
iop_msg_map(struct iop_softc * sc,struct iop_msg * im,u_int32_t * mb,void * xferaddr,int xfersize,int out,struct proc * up)1874 iop_msg_map(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1875 	    void *xferaddr, int xfersize, int out, struct proc *up)
1876 {
1877 	bus_dmamap_t dm;
1878 	bus_dma_segment_t *ds;
1879 	struct iop_xfer *ix;
1880 	u_int rv, i, nsegs, flg, off, xn;
1881 	u_int32_t *p;
1882 
1883 	for (xn = 0, ix = im->im_xfer; xn < IOP_MAX_MSG_XFERS; xn++, ix++)
1884 		if (ix->ix_size == 0)
1885 			break;
1886 
1887 #ifdef I2ODEBUG
1888 	if (xfersize == 0)
1889 		panic("iop_msg_map: null transfer");
1890 	if (xfersize > IOP_MAX_XFER)
1891 		panic("iop_msg_map: transfer too large");
1892 	if (xn == IOP_MAX_MSG_XFERS)
1893 		panic("iop_msg_map: too many xfers");
1894 #endif
1895 
1896 	/*
1897 	 * Only the first DMA map is static.
1898 	 */
1899 	if (xn != 0) {
1900 		rv = bus_dmamap_create(sc->sc_dmat, IOP_MAX_XFER,
1901 		    IOP_MAX_SEGS, IOP_MAX_XFER, 0,
1902 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ix->ix_map);
1903 		if (rv != 0)
1904 			return (rv);
1905 	}
1906 
1907 	dm = ix->ix_map;
1908 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, up,
1909 	    (up == NULL ? BUS_DMA_NOWAIT : 0));
1910 	if (rv != 0)
1911 		goto bad;
1912 
1913 	/*
1914 	 * How many SIMPLE SG elements can we fit in this message?
1915 	 */
1916 	off = mb[0] >> 16;
1917 	p = mb + off;
1918 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
1919 
1920 	if (dm->dm_nsegs > nsegs) {
1921 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
1922 		rv = EFBIG;
1923 		DPRINTF(("iop_msg_map: too many segs\n"));
1924 		goto bad;
1925 	}
1926 
1927 	nsegs = dm->dm_nsegs;
1928 	xfersize = 0;
1929 
1930 	/*
1931 	 * Write out the SG list.
1932 	 */
1933 	if (out)
1934 		flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
1935 	else
1936 		flg = I2O_SGL_SIMPLE;
1937 
1938 	for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
1939 		p[0] = (u_int32_t)ds->ds_len | flg;
1940 		p[1] = (u_int32_t)ds->ds_addr;
1941 		xfersize += ds->ds_len;
1942 	}
1943 
1944 	p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER;
1945 	p[1] = (u_int32_t)ds->ds_addr;
1946 	xfersize += ds->ds_len;
1947 
1948 	/* Fix up the transfer record, and sync the map. */
1949 	ix->ix_flags = (out ? IX_OUT : IX_IN);
1950 	ix->ix_size = xfersize;
1951 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
1952 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1953 
1954 	/*
1955 	 * If this is the first xfer we've mapped for this message, adjust
1956 	 * the SGL offset field in the message header.
1957 	 */
1958 	if ((im->im_flags & IM_SGLOFFADJ) == 0) {
1959 		mb[0] += (mb[0] >> 12) & 0xf0;
1960 		im->im_flags |= IM_SGLOFFADJ;
1961 	}
1962 	mb[0] += (nsegs << 17);
1963 	return (0);
1964 
1965  bad:
1966  	if (xn != 0)
1967 		bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
1968 	return (rv);
1969 }
1970 
1971 /*
1972  * Map a block I/O data transfer (different in that there's only one per
1973  * message maximum, and PAGE addressing may be used).  Write a scatter
1974  * gather list into the message frame.
1975  */
1976 int
iop_msg_map_bio(struct iop_softc * sc,struct iop_msg * im,u_int32_t * mb,void * xferaddr,int xfersize,int out)1977 iop_msg_map_bio(struct iop_softc *sc, struct iop_msg *im, u_int32_t *mb,
1978 		void *xferaddr, int xfersize, int out)
1979 {
1980 	bus_dma_segment_t *ds;
1981 	bus_dmamap_t dm;
1982 	struct iop_xfer *ix;
1983 	u_int rv, i, nsegs, off, slen, tlen, flg;
1984 	paddr_t saddr, eaddr;
1985 	u_int32_t *p;
1986 
1987 #ifdef I2ODEBUG
1988 	if (xfersize == 0)
1989 		panic("iop_msg_map_bio: null transfer");
1990 	if (xfersize > IOP_MAX_XFER)
1991 		panic("iop_msg_map_bio: transfer too large");
1992 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
1993 		panic("iop_msg_map_bio: SGLOFFADJ");
1994 #endif
1995 
1996 	ix = im->im_xfer;
1997 	dm = ix->ix_map;
1998 	rv = bus_dmamap_load(sc->sc_dmat, dm, xferaddr, xfersize, NULL,
1999 	    BUS_DMA_NOWAIT | BUS_DMA_STREAMING);
2000 	if (rv != 0)
2001 		return (rv);
2002 
2003 	off = mb[0] >> 16;
2004 	nsegs = ((sc->sc_framesize >> 2) - off) >> 1;
2005 
2006 	/*
2007 	 * If the transfer is highly fragmented and won't fit using SIMPLE
2008 	 * elements, use PAGE_LIST elements instead.  SIMPLE elements are
2009 	 * potentially more efficient, both for us and the IOP.
2010 	 */
2011 	if (dm->dm_nsegs > nsegs) {
2012 		nsegs = 1;
2013 		p = mb + off + 1;
2014 
2015 		/* XXX This should be done with a bus_space flag. */
2016 		for (i = dm->dm_nsegs, ds = dm->dm_segs; i > 0; i--, ds++) {
2017 			slen = ds->ds_len;
2018 			saddr = ds->ds_addr;
2019 
2020 			while (slen > 0) {
2021 				eaddr = (saddr + PAGE_SIZE) & ~(PAGE_SIZE - 1);
2022 				tlen = min(eaddr - saddr, slen);
2023 				slen -= tlen;
2024 				*p++ = le32toh(saddr);
2025 				saddr = eaddr;
2026 				nsegs++;
2027 			}
2028 		}
2029 
2030 		mb[off] = xfersize | I2O_SGL_PAGE_LIST | I2O_SGL_END_BUFFER |
2031 		    I2O_SGL_END;
2032 		if (out)
2033 			mb[off] |= I2O_SGL_DATA_OUT;
2034 	} else {
2035 		p = mb + off;
2036 		nsegs = dm->dm_nsegs;
2037 
2038 		if (out)
2039 			flg = I2O_SGL_SIMPLE | I2O_SGL_DATA_OUT;
2040 		else
2041 			flg = I2O_SGL_SIMPLE;
2042 
2043 		for (i = nsegs, ds = dm->dm_segs; i > 1; i--, p += 2, ds++) {
2044 			p[0] = (u_int32_t)ds->ds_len | flg;
2045 			p[1] = (u_int32_t)ds->ds_addr;
2046 		}
2047 
2048 		p[0] = (u_int32_t)ds->ds_len | flg | I2O_SGL_END_BUFFER |
2049 		    I2O_SGL_END;
2050 		p[1] = (u_int32_t)ds->ds_addr;
2051 		nsegs <<= 1;
2052 	}
2053 
2054 	/* Fix up the transfer record, and sync the map. */
2055 	ix->ix_flags = (out ? IX_OUT : IX_IN);
2056 	ix->ix_size = xfersize;
2057 	bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, xfersize,
2058 	    out ? BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2059 
2060 	/*
2061 	 * Adjust the SGL offset and total message size fields.  We don't
2062 	 * set IM_SGLOFFADJ, since it's used only for SIMPLE elements.
2063 	 */
2064 	mb[0] += ((off << 4) + (nsegs << 16));
2065 	return (0);
2066 }
2067 
2068 /*
2069  * Unmap all data transfers associated with a message wrapper.
2070  */
2071 void
iop_msg_unmap(struct iop_softc * sc,struct iop_msg * im)2072 iop_msg_unmap(struct iop_softc *sc, struct iop_msg *im)
2073 {
2074 	struct iop_xfer *ix;
2075 	int i;
2076 
2077 #ifdef I2ODEBUG
2078 	if (im->im_xfer[0].ix_size == 0)
2079 		panic("iop_msg_unmap: no transfers mapped");
2080 #endif
2081 
2082 	for (ix = im->im_xfer, i = 0;;) {
2083 		bus_dmamap_sync(sc->sc_dmat, ix->ix_map, 0, ix->ix_size,
2084 		    ix->ix_flags & IX_OUT ? BUS_DMASYNC_POSTWRITE :
2085 		    BUS_DMASYNC_POSTREAD);
2086 		bus_dmamap_unload(sc->sc_dmat, ix->ix_map);
2087 
2088 		/* Only the first DMA map is static. */
2089 		if (i != 0)
2090 			bus_dmamap_destroy(sc->sc_dmat, ix->ix_map);
2091 		if (++i >= IOP_MAX_MSG_XFERS)
2092 			break;
2093 		if ((++ix)->ix_size == 0)
2094 			break;
2095 	}
2096 }
2097 
2098 /*
2099  * Post a message frame to the IOP's inbound queue.
2100  */
2101 int
iop_post(struct iop_softc * sc,u_int32_t * mb)2102 iop_post(struct iop_softc *sc, u_int32_t *mb)
2103 {
2104 	u_int32_t mfa;
2105 
2106 #ifdef I2ODEBUG
2107 	if ((mb[0] >> 16) > (sc->sc_framesize >> 2))
2108 		panic("iop_post: frame too large");
2109 #endif
2110 
2111 	mutex_spin_enter(&sc->sc_intrlock);
2112 
2113 	/* Allocate a slot with the IOP. */
2114 	if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY)
2115 		if ((mfa = iop_inl(sc, IOP_REG_IFIFO)) == IOP_MFA_EMPTY) {
2116 			mutex_spin_exit(&sc->sc_intrlock);
2117 			aprint_error_dev(sc->sc_dev, "mfa not forthcoming\n");
2118 			return (EAGAIN);
2119 		}
2120 
2121 	/* Perform reply buffer DMA synchronisation. */
2122 	if (sc->sc_rep_size != 0) {
2123 		bus_dmamap_sync(sc->sc_dmat, sc->sc_rep_dmamap, 0,
2124 		    sc->sc_rep_size, BUS_DMASYNC_PREREAD);
2125 	}
2126 
2127 	/* Copy out the message frame. */
2128 	bus_space_write_region_4(sc->sc_msg_iot, sc->sc_msg_ioh, mfa, mb,
2129 	    mb[0] >> 16);
2130 	bus_space_barrier(sc->sc_msg_iot, sc->sc_msg_ioh, mfa,
2131 	    (mb[0] >> 14) & ~3, BUS_SPACE_BARRIER_WRITE);
2132 
2133 	/* Post the MFA back to the IOP. */
2134 	iop_outl(sc, IOP_REG_IFIFO, mfa);
2135 
2136 	mutex_spin_exit(&sc->sc_intrlock);
2137 	return (0);
2138 }
2139 
2140 /*
2141  * Post a message to the IOP and deal with completion.
2142  */
2143 int
iop_msg_post(struct iop_softc * sc,struct iop_msg * im,void * xmb,int timo)2144 iop_msg_post(struct iop_softc *sc, struct iop_msg *im, void *xmb, int timo)
2145 {
2146 	u_int32_t *mb;
2147 	int rv;
2148 
2149 	mb = xmb;
2150 
2151 	/* Terminate the scatter/gather list chain. */
2152 	if ((im->im_flags & IM_SGLOFFADJ) != 0)
2153 		mb[(mb[0] >> 16) - 2] |= I2O_SGL_END;
2154 
2155 	if ((rv = iop_post(sc, mb)) != 0)
2156 		return (rv);
2157 
2158 	if ((im->im_flags & (IM_POLL | IM_WAIT)) != 0) {
2159 		if ((im->im_flags & IM_POLL) != 0)
2160 			iop_msg_poll(sc, im, timo);
2161 		else
2162 			iop_msg_wait(sc, im, timo);
2163 
2164 		mutex_spin_enter(&sc->sc_intrlock);
2165 		if ((im->im_flags & IM_REPLIED) != 0) {
2166 			if ((im->im_flags & IM_NOSTATUS) != 0)
2167 				rv = 0;
2168 			else if ((im->im_flags & IM_FAIL) != 0)
2169 				rv = ENXIO;
2170 			else if (im->im_reqstatus != I2O_STATUS_SUCCESS)
2171 				rv = EIO;
2172 			else
2173 				rv = 0;
2174 		} else
2175 			rv = EBUSY;
2176 		mutex_spin_exit(&sc->sc_intrlock);
2177 	} else
2178 		rv = 0;
2179 
2180 	return (rv);
2181 }
2182 
2183 /*
2184  * Spin until the specified message is replied to.
2185  */
2186 static void
iop_msg_poll(struct iop_softc * sc,struct iop_msg * im,int timo)2187 iop_msg_poll(struct iop_softc *sc, struct iop_msg *im, int timo)
2188 {
2189 	u_int32_t rmfa;
2190 
2191 	mutex_spin_enter(&sc->sc_intrlock);
2192 
2193 	for (timo *= 10; timo != 0; timo--) {
2194 		if ((iop_inl(sc, IOP_REG_INTR_STATUS) & IOP_INTR_OFIFO) != 0) {
2195 			/* Double read to account for IOP bug. */
2196 			rmfa = iop_inl(sc, IOP_REG_OFIFO);
2197 			if (rmfa == IOP_MFA_EMPTY)
2198 				rmfa = iop_inl(sc, IOP_REG_OFIFO);
2199 			if (rmfa != IOP_MFA_EMPTY) {
2200 				iop_handle_reply(sc, rmfa);
2201 
2202 				/*
2203 				 * Return the reply frame to the IOP's
2204 				 * outbound FIFO.
2205 				 */
2206 				iop_outl(sc, IOP_REG_OFIFO, rmfa);
2207 			}
2208 		}
2209 		if ((im->im_flags & IM_REPLIED) != 0)
2210 			break;
2211 		mutex_spin_exit(&sc->sc_intrlock);
2212 		DELAY(100);
2213 		mutex_spin_enter(&sc->sc_intrlock);
2214 	}
2215 
2216 	if (timo == 0) {
2217 #ifdef I2ODEBUG
2218 		printf("%s: poll - no reply\n", device_xname(sc->sc_dev));
2219 		if (iop_status_get(sc, 1) != 0)
2220 			printf("iop_msg_poll: unable to retrieve status\n");
2221 		else
2222 			printf("iop_msg_poll: IOP state = %d\n",
2223 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2224 #endif
2225 	}
2226 
2227 	mutex_spin_exit(&sc->sc_intrlock);
2228 }
2229 
2230 /*
2231  * Sleep until the specified message is replied to.
2232  */
2233 static void
iop_msg_wait(struct iop_softc * sc,struct iop_msg * im,int timo)2234 iop_msg_wait(struct iop_softc *sc, struct iop_msg *im, int timo)
2235 {
2236 	int rv;
2237 
2238 	mutex_spin_enter(&sc->sc_intrlock);
2239 	if ((im->im_flags & IM_REPLIED) != 0) {
2240 		mutex_spin_exit(&sc->sc_intrlock);
2241 		return;
2242 	}
2243 	rv = cv_timedwait(&im->im_cv, &sc->sc_intrlock, mstohz(timo));
2244 	mutex_spin_exit(&sc->sc_intrlock);
2245 
2246 #ifdef I2ODEBUG
2247 	if (rv != 0) {
2248 		printf("iop_msg_wait: tsleep() == %d\n", rv);
2249 		if (iop_status_get(sc, 0) != 0)
2250 			printf("%s: unable to retrieve status\n", __func__);
2251 		else
2252 			printf("%s: IOP state = %d\n", __func__,
2253 			    (le32toh(sc->sc_status.segnumber) >> 16) & 0xff);
2254 	}
2255 #else
2256 	__USE(rv);
2257 #endif
2258 }
2259 
2260 /*
2261  * Release an unused message frame back to the IOP's inbound fifo.
2262  */
2263 static void
iop_release_mfa(struct iop_softc * sc,u_int32_t mfa)2264 iop_release_mfa(struct iop_softc *sc, u_int32_t mfa)
2265 {
2266 
2267 	/* Use the frame to issue a no-op. */
2268 	iop_outl_msg(sc, mfa, I2O_VERSION_11 | (4 << 16));
2269 	iop_outl_msg(sc, mfa + 4, I2O_MSGFUNC(I2O_TID_IOP, I2O_UTIL_NOP));
2270 	iop_outl_msg(sc, mfa + 8, 0);
2271 	iop_outl_msg(sc, mfa + 12, 0);
2272 
2273 	iop_outl(sc, IOP_REG_IFIFO, mfa);
2274 }
2275 
2276 #ifdef I2ODEBUG
2277 /*
2278  * Dump a reply frame header.
2279  */
2280 static void
iop_reply_print(struct iop_softc * sc,struct i2o_reply * rb)2281 iop_reply_print(struct iop_softc *sc, struct i2o_reply *rb)
2282 {
2283 	u_int function, detail;
2284 	const char *statusstr;
2285 
2286 	function = (le32toh(rb->msgfunc) >> 24) & 0xff;
2287 	detail = le16toh(rb->detail);
2288 
2289 	printf("%s: reply:\n", device_xname(sc->sc_dev));
2290 
2291 	if (rb->reqstatus < sizeof(iop_status) / sizeof(iop_status[0]))
2292 		statusstr = iop_status[rb->reqstatus];
2293 	else
2294 		statusstr = "undefined error code";
2295 
2296 	printf("%s:   function=0x%02x status=0x%02x (%s)\n",
2297 	    device_xname(sc->sc_dev), function, rb->reqstatus, statusstr);
2298 	printf("%s:   detail=0x%04x ictx=0x%08x tctx=0x%08x\n",
2299 	    device_xname(sc->sc_dev), detail, le32toh(rb->msgictx),
2300 	    le32toh(rb->msgtctx));
2301 	printf("%s:   tidi=%d tidt=%d flags=0x%02x\n", device_xname(sc->sc_dev),
2302 	    (le32toh(rb->msgfunc) >> 12) & 4095, le32toh(rb->msgfunc) & 4095,
2303 	    (le32toh(rb->msgflags) >> 8) & 0xff);
2304 }
2305 #endif
2306 
2307 /*
2308  * Dump a transport failure reply.
2309  */
2310 static void
iop_tfn_print(struct iop_softc * sc,struct i2o_fault_notify * fn)2311 iop_tfn_print(struct iop_softc *sc, struct i2o_fault_notify *fn)
2312 {
2313 
2314 	printf("%s: WARNING: transport failure:\n", device_xname(sc->sc_dev));
2315 
2316 	printf("%s:  ictx=0x%08x tctx=0x%08x\n", device_xname(sc->sc_dev),
2317 	    le32toh(fn->msgictx), le32toh(fn->msgtctx));
2318 	printf("%s:  failurecode=0x%02x severity=0x%02x\n",
2319 	    device_xname(sc->sc_dev), fn->failurecode, fn->severity);
2320 	printf("%s:  highestver=0x%02x lowestver=0x%02x\n",
2321 	    device_xname(sc->sc_dev), fn->highestver, fn->lowestver);
2322 }
2323 
2324 /*
2325  * Translate an I2O ASCII field into a C string.
2326  */
2327 void
iop_strvis(struct iop_softc * sc,const char * src,int slen,char * dst,int dlen)2328 iop_strvis(struct iop_softc *sc, const char *src, int slen, char *dst, int dlen)
2329 {
2330 	int hc, lc, i, nit;
2331 
2332 	dlen--;
2333 	lc = 0;
2334 	hc = 0;
2335 	i = 0;
2336 
2337 	/*
2338 	 * DPT use NUL as a space, whereas AMI use it as a terminator.  The
2339 	 * spec has nothing to say about it.  Since AMI fields are usually
2340 	 * filled with junk after the terminator, ...
2341 	 */
2342 	nit = (le16toh(sc->sc_status.orgid) != I2O_ORG_DPT);
2343 
2344 	while (slen-- != 0 && dlen-- != 0) {
2345 		if (nit && *src == '\0')
2346 			break;
2347 		else if (*src <= 0x20 || *src >= 0x7f) {
2348 			if (hc)
2349 				dst[i++] = ' ';
2350 		} else {
2351 			hc = 1;
2352 			dst[i++] = *src;
2353 			lc = i;
2354 		}
2355 		src++;
2356 	}
2357 
2358 	dst[lc] = '\0';
2359 }
2360 
2361 /*
2362  * Retrieve the DEVICE_IDENTITY parameter group from the target and dump it.
2363  */
2364 int
iop_print_ident(struct iop_softc * sc,int tid)2365 iop_print_ident(struct iop_softc *sc, int tid)
2366 {
2367 	struct {
2368 		struct	i2o_param_op_results pr;
2369 		struct	i2o_param_read_results prr;
2370 		struct	i2o_param_device_identity di;
2371 	} __packed p;
2372 	char buf[32];
2373 	int rv;
2374 
2375 	rv = iop_field_get_all(sc, tid, I2O_PARAM_DEVICE_IDENTITY, &p,
2376 	    sizeof(p), NULL);
2377 	if (rv != 0)
2378 		return (rv);
2379 
2380 	iop_strvis(sc, p.di.vendorinfo, sizeof(p.di.vendorinfo), buf,
2381 	    sizeof(buf));
2382 	printf(" <%s, ", buf);
2383 	iop_strvis(sc, p.di.productinfo, sizeof(p.di.productinfo), buf,
2384 	    sizeof(buf));
2385 	printf("%s, ", buf);
2386 	iop_strvis(sc, p.di.revlevel, sizeof(p.di.revlevel), buf, sizeof(buf));
2387 	printf("%s>", buf);
2388 
2389 	return (0);
2390 }
2391 
2392 /*
2393  * Claim or unclaim the specified TID.
2394  */
2395 int
iop_util_claim(struct iop_softc * sc,struct iop_initiator * ii,int release,int flags)2396 iop_util_claim(struct iop_softc *sc, struct iop_initiator *ii, int release,
2397 	       int flags)
2398 {
2399 	struct iop_msg *im;
2400 	struct i2o_util_claim mf;
2401 	int rv, func;
2402 
2403 	func = release ? I2O_UTIL_CLAIM_RELEASE : I2O_UTIL_CLAIM;
2404 	im = iop_msg_alloc(sc, IM_WAIT);
2405 
2406 	/* We can use the same structure, as they're identical. */
2407 	mf.msgflags = I2O_MSGFLAGS(i2o_util_claim);
2408 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, func);
2409 	mf.msgictx = ii->ii_ictx;
2410 	mf.msgtctx = im->im_tctx;
2411 	mf.flags = flags;
2412 
2413 	rv = iop_msg_post(sc, im, &mf, 5000);
2414 	iop_msg_free(sc, im);
2415 	return (rv);
2416 }
2417 
2418 /*
2419  * Perform an abort.
2420  */
iop_util_abort(struct iop_softc * sc,struct iop_initiator * ii,int func,int tctxabort,int flags)2421 int iop_util_abort(struct iop_softc *sc, struct iop_initiator *ii, int func,
2422 		   int tctxabort, int flags)
2423 {
2424 	struct iop_msg *im;
2425 	struct i2o_util_abort mf;
2426 	int rv;
2427 
2428 	im = iop_msg_alloc(sc, IM_WAIT);
2429 
2430 	mf.msgflags = I2O_MSGFLAGS(i2o_util_abort);
2431 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_ABORT);
2432 	mf.msgictx = ii->ii_ictx;
2433 	mf.msgtctx = im->im_tctx;
2434 	mf.flags = (func << 24) | flags;
2435 	mf.tctxabort = tctxabort;
2436 
2437 	rv = iop_msg_post(sc, im, &mf, 5000);
2438 	iop_msg_free(sc, im);
2439 	return (rv);
2440 }
2441 
2442 /*
2443  * Enable or disable reception of events for the specified device.
2444  */
iop_util_eventreg(struct iop_softc * sc,struct iop_initiator * ii,int mask)2445 int iop_util_eventreg(struct iop_softc *sc, struct iop_initiator *ii, int mask)
2446 {
2447 	struct i2o_util_event_register mf;
2448 
2449 	mf.msgflags = I2O_MSGFLAGS(i2o_util_event_register);
2450 	mf.msgfunc = I2O_MSGFUNC(ii->ii_tid, I2O_UTIL_EVENT_REGISTER);
2451 	mf.msgictx = ii->ii_ictx;
2452 	mf.msgtctx = 0;
2453 	mf.eventmask = mask;
2454 
2455 	/* This message is replied to only when events are signalled. */
2456 	return (iop_post(sc, (u_int32_t *)&mf));
2457 }
2458 
2459 int
iopopen(dev_t dev,int flag,int mode,struct lwp * l)2460 iopopen(dev_t dev, int flag, int mode, struct lwp *l)
2461 {
2462 	struct iop_softc *sc;
2463 
2464 	if ((sc = device_lookup_private(&iop_cd, minor(dev))) == NULL)
2465 		return (ENXIO);
2466 	if ((sc->sc_flags & IOP_ONLINE) == 0)
2467 		return (ENXIO);
2468 	if ((sc->sc_flags & IOP_OPEN) != 0)
2469 		return (EBUSY);
2470 	sc->sc_flags |= IOP_OPEN;
2471 
2472 	return (0);
2473 }
2474 
2475 int
iopclose(dev_t dev,int flag,int mode,struct lwp * l)2476 iopclose(dev_t dev, int flag, int mode,
2477     struct lwp *l)
2478 {
2479 	struct iop_softc *sc;
2480 
2481 	sc = device_lookup_private(&iop_cd, minor(dev));
2482 	sc->sc_flags &= ~IOP_OPEN;
2483 
2484 	return (0);
2485 }
2486 
2487 int
iopioctl(dev_t dev,u_long cmd,void * data,int flag,struct lwp * l)2488 iopioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
2489 {
2490 	struct iop_softc *sc;
2491 	struct iovec *iov;
2492 	int rv, i;
2493 
2494 	sc = device_lookup_private(&iop_cd, minor(dev));
2495 	rv = 0;
2496 
2497 	switch (cmd) {
2498 	case IOPIOCPT:
2499 		rv = kauth_authorize_device_passthru(l->l_cred, dev,
2500 		    KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
2501 		if (rv)
2502 			return (rv);
2503 
2504 		return (iop_passthrough(sc, (struct ioppt *)data, l->l_proc));
2505 
2506 	case IOPIOCGSTATUS:
2507 		iov = (struct iovec *)data;
2508 		i = sizeof(struct i2o_status);
2509 		if (i > iov->iov_len)
2510 			i = iov->iov_len;
2511 		else
2512 			iov->iov_len = i;
2513 		if ((rv = iop_status_get(sc, 0)) == 0)
2514 			rv = copyout(&sc->sc_status, iov->iov_base, i);
2515 		return (rv);
2516 
2517 	case IOPIOCGLCT:
2518 	case IOPIOCGTIDMAP:
2519 	case IOPIOCRECONFIG:
2520 		break;
2521 
2522 	default:
2523 #if defined(DIAGNOSTIC) || defined(I2ODEBUG)
2524 		printf("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd);
2525 #endif
2526 		return (ENOTTY);
2527 	}
2528 
2529 	mutex_enter(&sc->sc_conflock);
2530 
2531 	switch (cmd) {
2532 	case IOPIOCGLCT:
2533 		iov = (struct iovec *)data;
2534 		i = le16toh(sc->sc_lct->tablesize) << 2;
2535 		if (i > iov->iov_len)
2536 			i = iov->iov_len;
2537 		else
2538 			iov->iov_len = i;
2539 		rv = copyout(sc->sc_lct, iov->iov_base, i);
2540 		break;
2541 
2542 	case IOPIOCRECONFIG:
2543 		rv = iop_reconfigure(sc, 0);
2544 		break;
2545 
2546 	case IOPIOCGTIDMAP:
2547 		iov = (struct iovec *)data;
2548 		i = sizeof(struct iop_tidmap) * sc->sc_nlctent;
2549 		if (i > iov->iov_len)
2550 			i = iov->iov_len;
2551 		else
2552 			iov->iov_len = i;
2553 		rv = copyout(sc->sc_tidmap, iov->iov_base, i);
2554 		break;
2555 	}
2556 
2557 	mutex_exit(&sc->sc_conflock);
2558 	return (rv);
2559 }
2560 
2561 static int
iop_passthrough(struct iop_softc * sc,struct ioppt * pt,struct proc * p)2562 iop_passthrough(struct iop_softc *sc, struct ioppt *pt, struct proc *p)
2563 {
2564 	struct iop_msg *im;
2565 	struct i2o_msg *mf;
2566 	struct ioppt_buf *ptb;
2567 	int rv, i, mapped;
2568 
2569 	mf = NULL;
2570 	im = NULL;
2571 	mapped = 1;
2572 
2573 	if (pt->pt_msglen > sc->sc_framesize ||
2574 	    pt->pt_msglen < sizeof(struct i2o_msg) ||
2575 	    pt->pt_nbufs > IOP_MAX_MSG_XFERS ||
2576 	    pt->pt_nbufs < 0 ||
2577 #if 0
2578 	    pt->pt_replylen < 0 ||
2579 #endif
2580             pt->pt_timo < 1000 || pt->pt_timo > 5*60*1000)
2581 		return (EINVAL);
2582 
2583 	for (i = 0; i < pt->pt_nbufs; i++)
2584 		if (pt->pt_bufs[i].ptb_datalen > IOP_MAX_XFER) {
2585 			rv = ENOMEM;
2586 			goto bad;
2587 		}
2588 
2589 	mf = malloc(sc->sc_framesize, M_DEVBUF, M_WAITOK);
2590 	if (mf == NULL)
2591 		return (ENOMEM);
2592 
2593 	if ((rv = copyin(pt->pt_msg, mf, pt->pt_msglen)) != 0)
2594 		goto bad;
2595 
2596 	im = iop_msg_alloc(sc, IM_WAIT | IM_NOSTATUS);
2597 	im->im_rb = (struct i2o_reply *)mf;
2598 	mf->msgictx = IOP_ICTX;
2599 	mf->msgtctx = im->im_tctx;
2600 
2601 	for (i = 0; i < pt->pt_nbufs; i++) {
2602 		ptb = &pt->pt_bufs[i];
2603 		rv = iop_msg_map(sc, im, (u_int32_t *)mf, ptb->ptb_data,
2604 		    ptb->ptb_datalen, ptb->ptb_out != 0, p);
2605 		if (rv != 0)
2606 			goto bad;
2607 		mapped = 1;
2608 	}
2609 
2610 	if ((rv = iop_msg_post(sc, im, mf, pt->pt_timo)) != 0)
2611 		goto bad;
2612 
2613 	i = (le32toh(im->im_rb->msgflags) >> 14) & ~3;
2614 	if (i > sc->sc_framesize)
2615 		i = sc->sc_framesize;
2616 	if (i > pt->pt_replylen)
2617 		i = pt->pt_replylen;
2618 	rv = copyout(im->im_rb, pt->pt_reply, i);
2619 
2620  bad:
2621 	if (mapped != 0)
2622 		iop_msg_unmap(sc, im);
2623 	if (im != NULL)
2624 		iop_msg_free(sc, im);
2625 	if (mf != NULL)
2626 		free(mf, M_DEVBUF);
2627 	return (rv);
2628 }
2629