xref: /dragonfly/sys/dev/raid/iir/iir.c (revision cc93b0eb)
1 /* $FreeBSD: src/sys/dev/iir/iir.c,v 1.2.2.3 2002/05/05 08:18:12 asmodai Exp $ */
2 /* $DragonFly: src/sys/dev/raid/iir/iir.c,v 1.21 2008/05/18 20:30:23 pavalos Exp $ */
3 /*
4  *       Copyright (c) 2000-01 Intel Corporation
5  *       All Rights Reserved
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions, and the following disclaimer,
12  *    without modification, immediately at the beginning of the file.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. The name of the author may not be used to endorse or promote products
17  *    derived from this software without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
23  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  */
31 
32 /*
33  * iir.c: SCSI dependant code for the Intel Integrated RAID Controller driver
34  *
35  * Written by: Achim Leubner <achim.leubner@intel.com>
36  * Fixes/Additions: Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com>
37  *
38  * credits:     Niklas Hallqvist;       OpenBSD driver for the ICP Controllers.
39  *              Mike Smith;             Some driver source code.
40  *              FreeBSD.ORG;            Great O/S to work on and for.
41  *
42  * TODO:
43  */
44 
45 #ident "$Id: iir.c 1.2 2001/06/21 20:28:32 achim Exp $"
46 
47 #define _IIR_C_
48 
49 /* #include "opt_iir.h" */
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/types.h>
53 #include <sys/eventhandler.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
56 #include <sys/bus.h>
57 #include <sys/thread2.h>
58 
59 #include <machine/clock.h>
60 #include <machine/stdarg.h>
61 
62 #include <bus/cam/cam.h>
63 #include <bus/cam/cam_ccb.h>
64 #include <bus/cam/cam_sim.h>
65 #include <bus/cam/cam_xpt_sim.h>
66 #include <bus/cam/cam_debug.h>
67 #include <bus/cam/scsi/scsi_all.h>
68 #include <bus/cam/scsi/scsi_message.h>
69 
70 #include <vm/vm.h>
71 #include <vm/pmap.h>
72 
73 #include "iir.h"
74 
75 struct gdt_softc *gdt_wait_gdt;
76 int     gdt_wait_index;
77 
78 #ifdef GDT_DEBUG
79 int     gdt_debug = GDT_DEBUG;
80 #ifdef __SERIAL__
81 #define MAX_SERBUF 160
82 static void ser_init(void);
83 static void ser_puts(char *str);
84 static void ser_putc(int c);
85 static char strbuf[MAX_SERBUF+1];
86 #ifdef __COM2__
87 #define COM_BASE 0x2f8
88 #else
89 #define COM_BASE 0x3f8
90 #endif
91 static void ser_init()
92 {
93     unsigned port=COM_BASE;
94 
95     outb(port+3, 0x80);
96     outb(port+1, 0);
97     /* 19200 Baud, if 9600: outb(12,port) */
98     outb(port, 6);
99     outb(port+3, 3);
100     outb(port+1, 0);
101 }
102 
103 static void ser_puts(char *str)
104 {
105     char *ptr;
106 
107     ser_init();
108     for (ptr=str;*ptr;++ptr)
109         ser_putc((int)(*ptr));
110 }
111 
112 static void ser_putc(int c)
113 {
114     unsigned port=COM_BASE;
115 
116     while ((inb(port+5) & 0x20)==0);
117     outb(port, c);
118     if (c==0x0a)
119     {
120         while ((inb(port+5) & 0x20)==0);
121         outb(port, 0x0d);
122     }
123 }
124 
125 int
126 ser_kprintf(const char *fmt, ...)
127 {
128     __va_list args;
129     int i;
130 
131     __va_start(args,fmt);
132     i = kvsprintf(strbuf,fmt,args);
133     ser_puts(strbuf);
134     __va_end(args);
135     return i;
136 }
137 #endif
138 #endif
139 
140 /* The linked list of softc structures */
141 struct gdt_softc_list gdt_softcs = TAILQ_HEAD_INITIALIZER(gdt_softcs);
142 /* controller cnt. */
143 int gdt_cnt = 0;
144 /* event buffer */
145 static gdt_evt_str ebuffer[GDT_MAX_EVENTS];
146 static int elastidx, eoldidx;
147 /* statistics */
148 gdt_statist_t gdt_stat;
149 
150 /* Definitions for our use of the SIM private CCB area */
151 #define ccb_sim_ptr     spriv_ptr0
152 #define ccb_priority    spriv_field1
153 
154 static void     iir_action(struct cam_sim *sim, union ccb *ccb);
155 static void     iir_poll(struct cam_sim *sim);
156 static void     iir_shutdown(void *arg, int howto);
157 static void     iir_timeout(void *arg);
158 static void     iir_watchdog(void *arg);
159 
160 static void     gdt_eval_mapping(u_int32_t size, int *cyls, int *heads,
161                                  int *secs);
162 static int      gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
163                                  u_int8_t service, u_int16_t opcode,
164                                  u_int32_t arg1, u_int32_t arg2, u_int32_t arg3);
165 static int      gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *ccb,
166                          int timeout);
167 
168 static struct gdt_ccb *gdt_get_ccb(struct gdt_softc *gdt);
169 static u_int32_t gdt_ccb_vtop(struct gdt_softc *gdt,
170                               struct gdt_ccb *gccb);
171 
172 static int      gdt_sync_event(struct gdt_softc *gdt, int service,
173                                u_int8_t index, struct gdt_ccb *gccb);
174 static int      gdt_async_event(struct gdt_softc *gdt, int service);
175 static struct gdt_ccb *gdt_raw_cmd(struct gdt_softc *gdt,  union ccb *ccb);
176 static struct gdt_ccb *gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb);
177 static struct gdt_ccb *gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd);
178 static void     gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb);
179 
180 static void     gdtmapmem(void *arg, bus_dma_segment_t *dm_segs,
181                           int nseg, int error);
182 static void     gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs,
183                               int nseg, int error);
184 
185 int
186 iir_init(struct gdt_softc *gdt)
187 {
188     u_int16_t cdev_cnt;
189     int i, id, drv_cyls, drv_hds, drv_secs;
190     struct gdt_ccb *gccb;
191 
192     GDT_DPRINTF(GDT_D_DEBUG, ("iir_init()\n"));
193 
194     gdt->sc_state = GDT_POLLING;
195     gdt_clear_events();
196     bzero(&gdt_stat, sizeof(gdt_statist_t));
197 
198     SLIST_INIT(&gdt->sc_free_gccb);
199     SLIST_INIT(&gdt->sc_pending_gccb);
200     TAILQ_INIT(&gdt->sc_ccb_queue);
201     TAILQ_INIT(&gdt->sc_ucmd_queue);
202     TAILQ_INSERT_TAIL(&gdt_softcs, gdt, links);
203 
204     /* DMA tag for mapping buffers into device visible space. */
205     if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
206                            /*lowaddr*/BUS_SPACE_MAXADDR,
207                            /*highaddr*/BUS_SPACE_MAXADDR,
208                            /*filter*/NULL, /*filterarg*/NULL,
209                            /*maxsize*/MAXBSIZE, /*nsegments*/GDT_MAXSG,
210                            /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
211                            /*flags*/BUS_DMA_ALLOCNOW,
212                            &gdt->sc_buffer_dmat) != 0) {
213         kprintf("iir%d: bus_dma_tag_create(...,gdt->sc_buffer_dmat) failed\n",
214                gdt->sc_hanum);
215         return (1);
216     }
217     gdt->sc_init_level++;
218 
219     /* DMA tag for our ccb structures */
220     if (bus_dma_tag_create(gdt->sc_parent_dmat, /*alignment*/1, /*boundary*/0,
221                            /*lowaddr*/BUS_SPACE_MAXADDR,
222                            /*highaddr*/BUS_SPACE_MAXADDR,
223                            /*filter*/NULL, /*filterarg*/NULL,
224                            GDT_MAXCMDS * sizeof(struct gdt_ccb),
225                            /*nsegments*/1,
226                            /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT,
227                            /*flags*/0, &gdt->sc_gccb_dmat) != 0) {
228         kprintf("iir%d: bus_dma_tag_create(...,gdt->sc_gccb_dmat) failed\n",
229                gdt->sc_hanum);
230         return (1);
231     }
232     gdt->sc_init_level++;
233 
234     /* Allocation for our ccbs */
235     if (bus_dmamem_alloc(gdt->sc_gccb_dmat, (void **)&gdt->sc_gccbs,
236                          BUS_DMA_NOWAIT, &gdt->sc_gccb_dmamap) != 0) {
237         kprintf("iir%d: bus_dmamem_alloc(...,&gdt->sc_gccbs,...) failed\n",
238                gdt->sc_hanum);
239         return (1);
240     }
241     gdt->sc_init_level++;
242 
243     /* And permanently map them */
244     bus_dmamap_load(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap,
245                     gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb),
246                     gdtmapmem, &gdt->sc_gccb_busbase, /*flags*/0);
247     gdt->sc_init_level++;
248 
249     /* Clear them out. */
250     bzero(gdt->sc_gccbs, GDT_MAXCMDS * sizeof(struct gdt_ccb));
251 
252     /* Initialize the ccbs */
253     for (i = GDT_MAXCMDS-1; i >= 0; i--) {
254         gdt->sc_gccbs[i].gc_cmd_index = i + 2;
255         gdt->sc_gccbs[i].gc_flags = GDT_GCF_UNUSED;
256         gdt->sc_gccbs[i].gc_map_flag = FALSE;
257         if (bus_dmamap_create(gdt->sc_buffer_dmat, /*flags*/0,
258                               &gdt->sc_gccbs[i].gc_dmamap) != 0)
259             return(1);
260         gdt->sc_gccbs[i].gc_map_flag = TRUE;
261         SLIST_INSERT_HEAD(&gdt->sc_free_gccb, &gdt->sc_gccbs[i], sle);
262     }
263     gdt->sc_init_level++;
264 
265     /* create the control device */
266     gdt->sc_dev = gdt_make_dev(gdt->sc_hanum);
267 
268     /* allocate ccb for gdt_internal_cmd() */
269     gccb = gdt_get_ccb(gdt);
270     if (gccb == NULL) {
271         kprintf("iir%d: No free command index found\n",
272                gdt->sc_hanum);
273         return (1);
274     }
275 
276     if (!gdt_internal_cmd(gdt, gccb, GDT_SCREENSERVICE, GDT_INIT,
277                           0, 0, 0)) {
278         kprintf("iir%d: Screen service initialization error %d\n",
279                gdt->sc_hanum, gdt->sc_status);
280         gdt_free_ccb(gdt, gccb);
281         return (1);
282     }
283 
284     if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
285                           GDT_LINUX_OS, 0, 0)) {
286         kprintf("iir%d: Cache service initialization error %d\n",
287                gdt->sc_hanum, gdt->sc_status);
288         gdt_free_ccb(gdt, gccb);
289         return (1);
290     }
291     gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_UNFREEZE_IO,
292                      0, 0, 0);
293 
294     if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_MOUNT,
295                           0xffff, 1, 0)) {
296         kprintf("iir%d: Cache service mount error %d\n",
297                gdt->sc_hanum, gdt->sc_status);
298         gdt_free_ccb(gdt, gccb);
299         return (1);
300     }
301 
302     if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INIT,
303                           GDT_LINUX_OS, 0, 0)) {
304         kprintf("iir%d: Cache service post-mount initialization error %d\n",
305                gdt->sc_hanum, gdt->sc_status);
306         gdt_free_ccb(gdt, gccb);
307         return (1);
308     }
309     cdev_cnt = (u_int16_t)gdt->sc_info;
310     gdt->sc_fw_vers = gdt->sc_service;
311 
312     /* Detect number of buses */
313     gdt_enc32(gccb->gc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
314     gccb->gc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
315     gccb->gc_scratch[GDT_IOC_FIRST_CHAN] = 0;
316     gccb->gc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
317     gdt_enc32(gccb->gc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
318     if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
319                          GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
320                          GDT_IOC_HDR_SZ + GDT_MAXBUS * GDT_RAWIOC_SZ)) {
321         gdt->sc_bus_cnt = gccb->gc_scratch[GDT_IOC_CHAN_COUNT];
322         for (i = 0; i < gdt->sc_bus_cnt; i++) {
323             id = gccb->gc_scratch[GDT_IOC_HDR_SZ +
324                                  i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
325             gdt->sc_bus_id[i] = id < GDT_MAXID_FC ? id : 0xff;
326         }
327     } else {
328         /* New method failed, use fallback. */
329         for (i = 0; i < GDT_MAXBUS; i++) {
330             gdt_enc32(gccb->gc_scratch + GDT_GETCH_CHANNEL_NO, i);
331             if (!gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_IOCTL,
332                                   GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
333                                   GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
334                                   GDT_GETCH_SZ)) {
335                 if (i == 0) {
336                     kprintf("iir%d: Cannot get channel count, "
337                            "error %d\n", gdt->sc_hanum, gdt->sc_status);
338                     gdt_free_ccb(gdt, gccb);
339                     return (1);
340                 }
341                 break;
342             }
343             gdt->sc_bus_id[i] =
344                 (gccb->gc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID_FC) ?
345                 gccb->gc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
346         }
347         gdt->sc_bus_cnt = i;
348     }
349     /* add one "virtual" channel for the host drives */
350     gdt->sc_virt_bus = gdt->sc_bus_cnt;
351     gdt->sc_bus_cnt++;
352 
353     if (!gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_INIT,
354                           0, 0, 0)) {
355             kprintf("iir%d: Raw service initialization error %d\n",
356                    gdt->sc_hanum, gdt->sc_status);
357             gdt_free_ccb(gdt, gccb);
358             return (1);
359     }
360 
361     /* Set/get features raw service (scatter/gather) */
362     gdt->sc_raw_feat = 0;
363     if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
364                          GDT_SCATTER_GATHER, 0, 0)) {
365         if (gdt_internal_cmd(gdt, gccb, GDT_SCSIRAWSERVICE, GDT_GET_FEAT,
366                              0, 0, 0)) {
367             gdt->sc_raw_feat = gdt->sc_info;
368             if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
369                 panic("iir%d: Scatter/Gather Raw Service "
370                       "required but not supported!\n", gdt->sc_hanum);
371                 gdt_free_ccb(gdt, gccb);
372                 return (1);
373             }
374         }
375     }
376 
377     /* Set/get features cache service (scatter/gather) */
378     gdt->sc_cache_feat = 0;
379     if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_SET_FEAT,
380                          0, GDT_SCATTER_GATHER, 0)) {
381         if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_GET_FEAT,
382                              0, 0, 0)) {
383             gdt->sc_cache_feat = gdt->sc_info;
384             if (!(gdt->sc_info & GDT_SCATTER_GATHER)) {
385                 panic("iir%d: Scatter/Gather Cache Service "
386                   "required but not supported!\n", gdt->sc_hanum);
387                 gdt_free_ccb(gdt, gccb);
388                 return (1);
389             }
390         }
391     }
392 
393     /* Scan for cache devices */
394     for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) {
395         if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE, GDT_INFO,
396                              i, 0, 0)) {
397             gdt->sc_hdr[i].hd_present = 1;
398             gdt->sc_hdr[i].hd_size = gdt->sc_info;
399 
400             /*
401              * Evaluate mapping (sectors per head, heads per cyl)
402              */
403             gdt->sc_hdr[i].hd_size &= ~GDT_SECS32;
404             if (gdt->sc_info2 == 0)
405                 gdt_eval_mapping(gdt->sc_hdr[i].hd_size,
406                                  &drv_cyls, &drv_hds, &drv_secs);
407             else {
408                 drv_hds = gdt->sc_info2 & 0xff;
409                 drv_secs = (gdt->sc_info2 >> 8) & 0xff;
410                 drv_cyls = gdt->sc_hdr[i].hd_size / drv_hds /
411                     drv_secs;
412             }
413             gdt->sc_hdr[i].hd_heads = drv_hds;
414             gdt->sc_hdr[i].hd_secs = drv_secs;
415             /* Round the size */
416             gdt->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
417 
418             if (gdt_internal_cmd(gdt, gccb, GDT_CACHESERVICE,
419                                  GDT_DEVTYPE, i, 0, 0))
420                 gdt->sc_hdr[i].hd_devtype = gdt->sc_info;
421         }
422     }
423 
424     GDT_DPRINTF(GDT_D_INIT, ("dpmem %x %d-bus %d cache device%s\n",
425                              gdt->sc_dpmembase,
426                              gdt->sc_bus_cnt, cdev_cnt,
427                              cdev_cnt == 1 ? "" : "s"));
428     gdt_free_ccb(gdt, gccb);
429 
430     gdt_cnt++;
431     return (0);
432 }
433 
434 void
435 iir_free(struct gdt_softc *gdt)
436 {
437     int i;
438 
439     GDT_DPRINTF(GDT_D_INIT, ("iir_free()\n"));
440 
441     switch (gdt->sc_init_level) {
442       default:
443         gdt_destroy_dev(gdt->sc_dev);
444       case 5:
445         for (i = GDT_MAXCMDS-1; i >= 0; i--)
446             if (gdt->sc_gccbs[i].gc_map_flag)
447                 bus_dmamap_destroy(gdt->sc_buffer_dmat,
448                                    gdt->sc_gccbs[i].gc_dmamap);
449         bus_dmamap_unload(gdt->sc_gccb_dmat, gdt->sc_gccb_dmamap);
450       case 4:
451         bus_dmamem_free(gdt->sc_gccb_dmat, gdt->sc_gccbs, gdt->sc_gccb_dmamap);
452       case 3:
453         bus_dma_tag_destroy(gdt->sc_gccb_dmat);
454       case 2:
455         bus_dma_tag_destroy(gdt->sc_buffer_dmat);
456       case 1:
457         bus_dma_tag_destroy(gdt->sc_parent_dmat);
458       case 0:
459         break;
460     }
461     TAILQ_REMOVE(&gdt_softcs, gdt, links);
462 }
463 
464 void
465 iir_attach(struct gdt_softc *gdt)
466 {
467     struct cam_devq *devq;
468     int i;
469 
470     GDT_DPRINTF(GDT_D_INIT, ("iir_attach()\n"));
471 
472     callout_init(&gdt->watchdog_timer);
473     /*
474      * Create the device queue for our SIM.
475      */
476     devq = cam_simq_alloc(GDT_MAXCMDS);
477     if (devq == NULL)
478         return;
479 
480     for (i = 0; i < gdt->sc_bus_cnt; i++) {
481         /*
482          * Construct our SIM entry
483          */
484         gdt->sims[i] = cam_sim_alloc(iir_action, iir_poll, "iir",
485                                      gdt, gdt->sc_hanum, &sim_mplock,
486 				     /*untagged*/2,
487                                      /*tagged*/GDT_MAXCMDS, devq);
488         if (xpt_bus_register(gdt->sims[i], i) != CAM_SUCCESS) {
489             cam_sim_free(gdt->sims[i]);
490             break;
491         }
492 
493         if (xpt_create_path(&gdt->paths[i], /*periph*/NULL,
494                             cam_sim_path(gdt->sims[i]),
495                             CAM_TARGET_WILDCARD,
496                             CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
497             xpt_bus_deregister(cam_sim_path(gdt->sims[i]));
498             cam_sim_free(gdt->sims[i]);
499             break;
500         }
501     }
502     cam_simq_release(devq);
503     if (i > 0)
504         EVENTHANDLER_REGISTER(shutdown_post_sync, iir_shutdown,
505                               gdt, SHUTDOWN_PRI_DRIVER);
506     /* iir_watchdog(gdt); */
507     gdt->sc_state = GDT_NORMAL;
508 }
509 
510 static void
511 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
512 {
513     *cyls = size / GDT_HEADS / GDT_SECS;
514     if (*cyls < GDT_MAXCYLS) {
515         *heads = GDT_HEADS;
516         *secs = GDT_SECS;
517     } else {
518         /* Too high for 64 * 32 */
519         *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
520         if (*cyls < GDT_MAXCYLS) {
521             *heads = GDT_MEDHEADS;
522             *secs = GDT_MEDSECS;
523         } else {
524             /* Too high for 127 * 63 */
525             *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
526             *heads = GDT_BIGHEADS;
527             *secs = GDT_BIGSECS;
528         }
529     }
530 }
531 
532 static int
533 gdt_wait(struct gdt_softc *gdt, struct gdt_ccb *gccb,
534          int timeout)
535 {
536     int rv = 0;
537 
538     GDT_DPRINTF(GDT_D_INIT,
539                 ("gdt_wait(%p, %p, %d)\n", gdt, gccb, timeout));
540 
541     gdt->sc_state |= GDT_POLL_WAIT;
542     do {
543         iir_intr(gdt);
544         if (gdt == gdt_wait_gdt &&
545             gccb->gc_cmd_index == gdt_wait_index) {
546             rv = 1;
547             break;
548         }
549         DELAY(1);
550     } while (--timeout);
551     gdt->sc_state &= ~GDT_POLL_WAIT;
552 
553     while (gdt->sc_test_busy(gdt))
554         DELAY(1);               /* XXX correct? */
555 
556     return (rv);
557 }
558 
559 static int
560 gdt_internal_cmd(struct gdt_softc *gdt, struct gdt_ccb *gccb,
561                  u_int8_t service, u_int16_t opcode,
562                  u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
563 {
564     int retries;
565 
566     GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d)\n",
567                             gdt, service, opcode, arg1, arg2, arg3));
568 
569     bzero(gdt->sc_cmd, GDT_CMD_SZ);
570 
571     for (retries = GDT_RETRIES; ; ) {
572         gccb->gc_service = service;
573         gccb->gc_flags = GDT_GCF_INTERNAL;
574 
575         gdt->sc_set_sema0(gdt);
576         gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
577                   gccb->gc_cmd_index);
578         gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
579 
580         switch (service) {
581           case GDT_CACHESERVICE:
582             if (opcode == GDT_IOCTL) {
583                 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
584                           GDT_IOCTL_SUBFUNC, arg1);
585                 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
586                           GDT_IOCTL_CHANNEL, arg2);
587                 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
588                           GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
589                 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
590                           gdt_ccb_vtop(gdt, gccb) +
591                           offsetof(struct gdt_ccb, gc_scratch[0]));
592             } else {
593                 gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION +
594                           GDT_CACHE_DEVICENO, (u_int16_t)arg1);
595                 gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
596                           GDT_CACHE_BLOCKNO, arg2);
597             }
598             break;
599 
600           case GDT_SCSIRAWSERVICE:
601             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION +
602                       GDT_RAW_DIRECTION, arg1);
603             gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
604                 (u_int8_t)arg2;
605             gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
606                 (u_int8_t)arg3;
607             gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
608                 (u_int8_t)(arg3 >> 8);
609         }
610 
611         gdt->sc_cmd_len = GDT_CMD_SZ;
612         gdt->sc_cmd_off = 0;
613         gdt->sc_cmd_cnt = 0;
614         gdt->sc_copy_cmd(gdt, gccb);
615         gdt->sc_release_event(gdt);
616         DELAY(20);
617         if (!gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT))
618             return (0);
619         if (gdt->sc_status != GDT_S_BSY || --retries == 0)
620             break;
621         DELAY(1);
622     }
623     return (gdt->sc_status == GDT_S_OK);
624 }
625 
626 static struct gdt_ccb *
627 gdt_get_ccb(struct gdt_softc *gdt)
628 {
629     struct gdt_ccb *gccb;
630 
631     GDT_DPRINTF(GDT_D_QUEUE, ("gdt_get_ccb(%p)\n", gdt));
632 
633     crit_enter();
634     gccb = SLIST_FIRST(&gdt->sc_free_gccb);
635     if (gccb != NULL) {
636         SLIST_REMOVE_HEAD(&gdt->sc_free_gccb, sle);
637         SLIST_INSERT_HEAD(&gdt->sc_pending_gccb, gccb, sle);
638         ++gdt_stat.cmd_index_act;
639         if (gdt_stat.cmd_index_act > gdt_stat.cmd_index_max)
640             gdt_stat.cmd_index_max = gdt_stat.cmd_index_act;
641     }
642     crit_exit();
643     return (gccb);
644 }
645 
646 void
647 gdt_free_ccb(struct gdt_softc *gdt, struct gdt_ccb *gccb)
648 {
649     GDT_DPRINTF(GDT_D_QUEUE, ("gdt_free_ccb(%p, %p)\n", gdt, gccb));
650 
651     crit_enter();
652     gccb->gc_flags = GDT_GCF_UNUSED;
653     SLIST_REMOVE(&gdt->sc_pending_gccb, gccb, gdt_ccb, sle);
654     SLIST_INSERT_HEAD(&gdt->sc_free_gccb, gccb, sle);
655     --gdt_stat.cmd_index_act;
656     crit_exit();
657     if (gdt->sc_state & GDT_SHUTDOWN)
658         wakeup(gccb);
659 }
660 
661 static u_int32_t
662 gdt_ccb_vtop(struct gdt_softc *gdt, struct gdt_ccb *gccb)
663 {
664     return (gdt->sc_gccb_busbase
665             + (u_int32_t)((caddr_t)gccb - (caddr_t)gdt->sc_gccbs));
666 }
667 
668 void
669 gdt_next(struct gdt_softc *gdt)
670 {
671     union ccb *ccb;
672     gdt_ucmd_t *ucmd;
673     struct cam_sim *sim;
674     int bus, target, lun;
675     int next_cmd;
676 
677     struct ccb_scsiio *csio;
678     struct ccb_hdr *ccbh;
679     struct gdt_ccb *gccb = NULL;
680     u_int8_t cmd;
681 
682     GDT_DPRINTF(GDT_D_QUEUE, ("gdt_next(%p)\n", gdt));
683 
684     crit_enter();
685     if (gdt->sc_test_busy(gdt)) {
686         if (!(gdt->sc_state & GDT_POLLING)) {
687 	    crit_exit();
688             return;
689         }
690         while (gdt->sc_test_busy(gdt))
691             DELAY(1);
692     }
693 
694     gdt->sc_cmd_cnt = gdt->sc_cmd_off = 0;
695     next_cmd = TRUE;
696     for (;;) {
697         /* I/Os in queue? controller ready? */
698         if (!TAILQ_FIRST(&gdt->sc_ucmd_queue) &&
699             !TAILQ_FIRST(&gdt->sc_ccb_queue))
700             break;
701 
702         /* 1.: I/Os without ccb (IOCTLs) */
703         ucmd = TAILQ_FIRST(&gdt->sc_ucmd_queue);
704         if (ucmd != NULL) {
705             TAILQ_REMOVE(&gdt->sc_ucmd_queue, ucmd, links);
706             if ((gccb = gdt_ioctl_cmd(gdt, ucmd)) == NULL) {
707                 TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
708                 break;
709             }
710             break;
711             /* wenn mehrere Kdos. zulassen: if (!gdt_polling) continue; */
712         }
713 
714         /* 2.: I/Os with ccb */
715         ccb = (union ccb *)TAILQ_FIRST(&gdt->sc_ccb_queue);
716         /* ist dann immer != NULL, da oben getestet */
717         sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
718         bus = cam_sim_bus(sim);
719         target = ccb->ccb_h.target_id;
720         lun = ccb->ccb_h.target_lun;
721 
722         TAILQ_REMOVE(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
723         --gdt_stat.req_queue_act;
724         /* ccb->ccb_h.func_code is XPT_SCSI_IO */
725         GDT_DPRINTF(GDT_D_QUEUE, ("XPT_SCSI_IO flags 0x%x)\n",
726                                   ccb->ccb_h.flags));
727         csio = &ccb->csio;
728         ccbh = &ccb->ccb_h;
729         cmd  = csio->cdb_io.cdb_bytes[0];
730         /* Max CDB length is 12 bytes */
731         if (csio->cdb_len > 12) {
732             ccbh->status = CAM_REQ_INVALID;
733             --gdt_stat.io_count_act;
734             xpt_done(ccb);
735         } else if (bus != gdt->sc_virt_bus) {
736             /* raw service command */
737             if ((gccb = gdt_raw_cmd(gdt, ccb)) == NULL) {
738                 TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
739                                   sim_links.tqe);
740                 ++gdt_stat.req_queue_act;
741                 if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
742                     gdt_stat.req_queue_max = gdt_stat.req_queue_act;
743                 next_cmd = FALSE;
744             }
745         } else if (target >= GDT_MAX_HDRIVES ||
746                    !gdt->sc_hdr[target].hd_present || lun != 0) {
747             ccbh->status = CAM_SEL_TIMEOUT;
748             --gdt_stat.io_count_act;
749             xpt_done(ccb);
750         } else {
751             /* cache service command */
752             if (cmd == READ_6  || cmd == WRITE_6 ||
753                 cmd == READ_10 || cmd == WRITE_10) {
754                 if ((gccb = gdt_cache_cmd(gdt, ccb)) == NULL) {
755                     TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h,
756                                       sim_links.tqe);
757                     ++gdt_stat.req_queue_act;
758                     if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
759                         gdt_stat.req_queue_max = gdt_stat.req_queue_act;
760                     next_cmd = FALSE;
761                 }
762             } else {
763 		crit_exit();
764                 gdt_internal_cache_cmd(gdt, ccb);
765 		crit_enter();
766             }
767         }
768         if ((gdt->sc_state & GDT_POLLING) || !next_cmd)
769             break;
770     }
771     if (gdt->sc_cmd_cnt > 0)
772         gdt->sc_release_event(gdt);
773 
774     crit_exit();
775 
776     if ((gdt->sc_state & GDT_POLLING) && gdt->sc_cmd_cnt > 0) {
777         gdt_wait(gdt, gccb, GDT_POLL_TIMEOUT);
778     }
779 }
780 
781 static struct gdt_ccb *
782 gdt_raw_cmd(struct gdt_softc *gdt, union ccb *ccb)
783 {
784     struct gdt_ccb *gccb;
785     struct cam_sim *sim;
786 
787     GDT_DPRINTF(GDT_D_CMD, ("gdt_raw_cmd(%p, %p)\n", gdt, ccb));
788 
789     if (roundup(GDT_CMD_UNION + GDT_RAW_SZ, sizeof(u_int32_t)) +
790         gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
791         gdt->sc_ic_all_size) {
792         GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_raw_cmd(): DPMEM overflow\n",
793                                     gdt->sc_hanum));
794         return (NULL);
795     }
796 
797     bzero(gdt->sc_cmd, GDT_CMD_SZ);
798 
799     gccb = gdt_get_ccb(gdt);
800     if (gccb == NULL) {
801         GDT_DPRINTF(GDT_D_INVALID, ("iir%d: No free command index found\n",
802                                     gdt->sc_hanum));
803         return (gccb);
804     }
805     sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
806     gccb->gc_ccb = ccb;
807     gccb->gc_service = GDT_SCSIRAWSERVICE;
808     gccb->gc_flags = GDT_GCF_SCSI;
809 
810     if (gdt->sc_cmd_cnt == 0)
811         gdt->sc_set_sema0(gdt);
812     crit_exit();
813     gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
814               gccb->gc_cmd_index);
815     gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
816 
817     gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
818               (ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN ?
819               GDT_DATA_IN : GDT_DATA_OUT);
820     gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
821               ccb->csio.dxfer_len);
822     gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
823               ccb->csio.cdb_len);
824     bcopy(ccb->csio.cdb_io.cdb_bytes, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
825           ccb->csio.cdb_len);
826     gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
827         ccb->ccb_h.target_id;
828     gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
829         ccb->ccb_h.target_lun;
830     gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
831         cam_sim_bus(sim);
832     gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
833               sizeof(struct scsi_sense_data));
834     gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
835               gdt_ccb_vtop(gdt, gccb) +
836               offsetof(struct gdt_ccb, gc_scratch[0]));
837 
838     /*
839      * If we have any data to send with this command,
840      * map it into bus space.
841      */
842     /* Only use S/G if there is a transfer */
843     if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
844         if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
845             if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
846                 int error;
847 
848                 /* vorher unlock von splcam() ??? */
849 		crit_enter();
850                 error =
851                     bus_dmamap_load(gdt->sc_buffer_dmat,
852                                     gccb->gc_dmamap,
853                                     ccb->csio.data_ptr,
854                                     ccb->csio.dxfer_len,
855                                     gdtexecuteccb,
856                                     gccb, /*flags*/0);
857                 if (error == EINPROGRESS) {
858                     xpt_freeze_simq(sim, 1);
859                     gccb->gc_state |= CAM_RELEASE_SIMQ;
860                 }
861 		crit_exit();
862             } else {
863                 struct bus_dma_segment seg;
864 
865                 /* Pointer to physical buffer */
866                 seg.ds_addr =
867                     (bus_addr_t)ccb->csio.data_ptr;
868                 seg.ds_len = ccb->csio.dxfer_len;
869                 gdtexecuteccb(gccb, &seg, 1, 0);
870             }
871         } else {
872             struct bus_dma_segment *segs;
873 
874             if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
875                 panic("iir%d: iir_action - Physical "
876                       "segment pointers unsupported", gdt->sc_hanum);
877 
878             if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
879                 panic("iir%d: iir_action - Virtual "
880                       "segment addresses unsupported", gdt->sc_hanum);
881 
882             /* Just use the segments provided */
883             segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
884             gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
885         }
886     } else {
887         gdtexecuteccb(gccb, NULL, 0, 0);
888     }
889 
890     crit_enter();
891     return (gccb);
892 }
893 
894 static struct gdt_ccb *
895 gdt_cache_cmd(struct gdt_softc *gdt, union ccb *ccb)
896 {
897     struct gdt_ccb *gccb;
898     struct cam_sim *sim;
899     u_int8_t *cmdp;
900     u_int16_t opcode;
901     u_int32_t blockno, blockcnt;
902 
903     GDT_DPRINTF(GDT_D_CMD, ("gdt_cache_cmd(%p, %p)\n", gdt, ccb));
904 
905     if (roundup(GDT_CMD_UNION + GDT_CACHE_SZ, sizeof(u_int32_t)) +
906         gdt->sc_cmd_off + GDT_DPMEM_COMMAND_OFFSET >
907         gdt->sc_ic_all_size) {
908         GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_cache_cmd(): DPMEM overflow\n",
909                                     gdt->sc_hanum));
910         return (NULL);
911     }
912 
913     bzero(gdt->sc_cmd, GDT_CMD_SZ);
914 
915     gccb = gdt_get_ccb(gdt);
916     if (gccb == NULL) {
917         GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
918                                   gdt->sc_hanum));
919         return (gccb);
920     }
921     sim = (struct cam_sim *)ccb->ccb_h.ccb_sim_ptr;
922     gccb->gc_ccb = ccb;
923     gccb->gc_service = GDT_CACHESERVICE;
924     gccb->gc_flags = GDT_GCF_SCSI;
925 
926     if (gdt->sc_cmd_cnt == 0)
927         gdt->sc_set_sema0(gdt);
928     crit_exit();
929     gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
930               gccb->gc_cmd_index);
931     cmdp = ccb->csio.cdb_io.cdb_bytes;
932     opcode = (*cmdp == WRITE_6 || *cmdp == WRITE_10) ? GDT_WRITE : GDT_READ;
933     if ((gdt->sc_state & GDT_SHUTDOWN) && opcode == GDT_WRITE)
934         opcode = GDT_WRITE_THR;
935     gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, opcode);
936 
937     gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
938               ccb->ccb_h.target_id);
939     if (ccb->csio.cdb_len == 6) {
940         struct scsi_rw_6 *rw = (struct scsi_rw_6 *)cmdp;
941         blockno = scsi_3btoul(rw->addr) & ((SRW_TOPADDR<<16) | 0xffff);
942         blockcnt = rw->length ? rw->length : 0x100;
943     } else {
944         struct scsi_rw_10 *rw = (struct scsi_rw_10 *)cmdp;
945         blockno = scsi_4btoul(rw->addr);
946         blockcnt = scsi_2btoul(rw->length);
947     }
948     gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
949               blockno);
950     gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
951               blockcnt);
952 
953     /*
954      * If we have any data to send with this command,
955      * map it into bus space.
956      */
957     /* Only use S/G if there is a transfer */
958     if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
959         if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
960             int error;
961 
962             /* vorher unlock von splcam() ??? */
963 	    crit_enter();
964             error =
965                 bus_dmamap_load(gdt->sc_buffer_dmat,
966                                 gccb->gc_dmamap,
967                                 ccb->csio.data_ptr,
968                                 ccb->csio.dxfer_len,
969                                 gdtexecuteccb,
970                                 gccb, /*flags*/0);
971             if (error == EINPROGRESS) {
972                 xpt_freeze_simq(sim, 1);
973                 gccb->gc_state |= CAM_RELEASE_SIMQ;
974             }
975 	    crit_exit();
976         } else {
977             struct bus_dma_segment seg;
978 
979             /* Pointer to physical buffer */
980             seg.ds_addr =
981                 (bus_addr_t)ccb->csio.data_ptr;
982             seg.ds_len = ccb->csio.dxfer_len;
983             gdtexecuteccb(gccb, &seg, 1, 0);
984         }
985     } else {
986         struct bus_dma_segment *segs;
987 
988         if ((ccb->ccb_h.flags & CAM_DATA_PHYS) != 0)
989             panic("iir%d: iir_action - Physical "
990                   "segment pointers unsupported", gdt->sc_hanum);
991 
992         if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS)==0)
993             panic("iir%d: iir_action - Virtual "
994                   "segment addresses unsupported", gdt->sc_hanum);
995 
996         /* Just use the segments provided */
997         segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
998         gdtexecuteccb(gccb, segs, ccb->csio.sglist_cnt, 0);
999     }
1000 
1001     crit_enter();
1002     return (gccb);
1003 }
1004 
1005 static struct gdt_ccb *
1006 gdt_ioctl_cmd(struct gdt_softc *gdt, gdt_ucmd_t *ucmd)
1007 {
1008     struct gdt_ccb *gccb;
1009     u_int32_t cnt;
1010 
1011     GDT_DPRINTF(GDT_D_DEBUG, ("gdt_ioctl_cmd(%p, %p)\n", gdt, ucmd));
1012 
1013     bzero(gdt->sc_cmd, GDT_CMD_SZ);
1014 
1015     gccb = gdt_get_ccb(gdt);
1016     if (gccb == NULL) {
1017         GDT_DPRINTF(GDT_D_DEBUG, ("iir%d: No free command index found\n",
1018                                   gdt->sc_hanum));
1019         return (gccb);
1020     }
1021     gccb->gc_ucmd = ucmd;
1022     gccb->gc_service = ucmd->service;
1023     gccb->gc_flags = GDT_GCF_IOCTL;
1024 
1025     /* check DPMEM space, copy data buffer from user space */
1026     if (ucmd->service == GDT_CACHESERVICE) {
1027         if (ucmd->OpCode == GDT_IOCTL) {
1028             gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_IOCTL_SZ,
1029                                       sizeof(u_int32_t));
1030             cnt = ucmd->u.ioctl.param_size;
1031             if (cnt > GDT_SCRATCH_SZ) {
1032                 kprintf("iir%d: Scratch buffer too small (%d/%d)\n",
1033                        gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1034                 gdt_free_ccb(gdt, gccb);
1035                 return (NULL);
1036             }
1037         } else {
1038             gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1039                                       GDT_SG_SZ, sizeof(u_int32_t));
1040             cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1041             if (cnt > GDT_SCRATCH_SZ) {
1042                 kprintf("iir%d: Scratch buffer too small (%d/%d)\n",
1043                        gdt->sc_hanum, GDT_SCRATCH_SZ, cnt);
1044                 gdt_free_ccb(gdt, gccb);
1045                 return (NULL);
1046             }
1047         }
1048     } else {
1049         gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1050                                   GDT_SG_SZ, sizeof(u_int32_t));
1051         cnt = ucmd->u.raw.sdlen;
1052         if (cnt + ucmd->u.raw.sense_len > GDT_SCRATCH_SZ) {
1053             kprintf("iir%d: Scratch buffer too small (%d/%d)\n",
1054                    gdt->sc_hanum, GDT_SCRATCH_SZ, cnt + ucmd->u.raw.sense_len);
1055             gdt_free_ccb(gdt, gccb);
1056             return (NULL);
1057         }
1058     }
1059     if (cnt != 0)
1060         bcopy(ucmd->data, gccb->gc_scratch, cnt);
1061 
1062     if (gdt->sc_cmd_off + gdt->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
1063         gdt->sc_ic_all_size) {
1064         GDT_DPRINTF(GDT_D_INVALID, ("iir%d: gdt_ioctl_cmd(): DPMEM overflow\n",
1065                                     gdt->sc_hanum));
1066         gdt_free_ccb(gdt, gccb);
1067         return (NULL);
1068     }
1069 
1070     if (gdt->sc_cmd_cnt == 0)
1071         gdt->sc_set_sema0(gdt);
1072     crit_exit();
1073 
1074     /* fill cmd structure */
1075     gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1076               gccb->gc_cmd_index);
1077     gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE,
1078               ucmd->OpCode);
1079 
1080     if (ucmd->service == GDT_CACHESERVICE) {
1081         if (ucmd->OpCode == GDT_IOCTL) {
1082             /* IOCTL */
1083             gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_PARAM_SIZE,
1084                       ucmd->u.ioctl.param_size);
1085             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_SUBFUNC,
1086                       ucmd->u.ioctl.subfunc);
1087             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_CHANNEL,
1088                       ucmd->u.ioctl.channel);
1089             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_IOCTL_P_PARAM,
1090                       gdt_ccb_vtop(gdt, gccb) +
1091                       offsetof(struct gdt_ccb, gc_scratch[0]));
1092         } else {
1093             /* cache service command */
1094             gdt_enc16(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
1095                       ucmd->u.cache.DeviceNo);
1096             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
1097                       ucmd->u.cache.BlockNo);
1098             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
1099                       ucmd->u.cache.BlockCnt);
1100             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1101                       0xffffffffUL);
1102             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1103                       1);
1104             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1105                       GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1106                       offsetof(struct gdt_ccb, gc_scratch[0]));
1107             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1108                       GDT_SG_LEN, ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE);
1109         }
1110     } else {
1111         /* raw service command */
1112         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_DIRECTION,
1113                   ucmd->u.raw.direction);
1114         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1115                   0xffffffffUL);
1116         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDLEN,
1117                   ucmd->u.raw.sdlen);
1118         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CLEN,
1119                   ucmd->u.raw.clen);
1120         bcopy(ucmd->u.raw.cmd, gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_CMD,
1121               12);
1122         gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1123             ucmd->u.raw.target;
1124         gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1125             ucmd->u.raw.lun;
1126         gdt->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1127             ucmd->u.raw.bus;
1128         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_LEN,
1129                   ucmd->u.raw.sense_len);
1130         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SENSE_DATA,
1131                   gdt_ccb_vtop(gdt, gccb) +
1132                   offsetof(struct gdt_ccb, gc_scratch[ucmd->u.raw.sdlen]));
1133         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1134                   1);
1135         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1136                   GDT_SG_PTR, gdt_ccb_vtop(gdt, gccb) +
1137                   offsetof(struct gdt_ccb, gc_scratch[0]));
1138         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1139                   GDT_SG_LEN, ucmd->u.raw.sdlen);
1140     }
1141 
1142     crit_enter();
1143     gdt_stat.sg_count_act = 1;
1144     gdt->sc_copy_cmd(gdt, gccb);
1145     return (gccb);
1146 }
1147 
1148 static void
1149 gdt_internal_cache_cmd(struct gdt_softc *gdt,union ccb *ccb)
1150 {
1151     int t;
1152 
1153     t = ccb->ccb_h.target_id;
1154     GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd(%p, %p, 0x%x, %d)\n",
1155         gdt, ccb, ccb->csio.cdb_io.cdb_bytes[0], t));
1156 
1157     switch (ccb->csio.cdb_io.cdb_bytes[0]) {
1158       case TEST_UNIT_READY:
1159       case START_STOP:
1160         break;
1161       case REQUEST_SENSE:
1162         GDT_DPRINTF(GDT_D_MISC, ("REQUEST_SENSE\n"));
1163         break;
1164       case INQUIRY:
1165         {
1166             struct scsi_inquiry_data *inq;
1167 
1168             inq = (struct scsi_inquiry_data *)ccb->csio.data_ptr;
1169             bzero(inq, sizeof(struct scsi_inquiry_data));
1170             inq->device = (gdt->sc_hdr[t].hd_devtype & 4) ?
1171                 T_CDROM : T_DIRECT;
1172             inq->dev_qual2 = (gdt->sc_hdr[t].hd_devtype & 1) ? 0x80 : 0;
1173             inq->version = SCSI_REV_2;
1174             inq->response_format = 2;
1175             inq->additional_length = 32;
1176             inq->flags = SID_CmdQue | SID_Sync;
1177             strcpy(inq->vendor, "IIR     ");
1178             ksprintf(inq->product, "Host Drive   #%02d", t);
1179             strcpy(inq->revision, "   ");
1180             break;
1181         }
1182       case MODE_SENSE_6:
1183         {
1184             struct mpd_data {
1185                 struct scsi_mode_hdr_6 hd;
1186                 struct scsi_mode_block_descr bd;
1187                 struct scsi_control_page cp;
1188             } *mpd;
1189             u_int8_t page;
1190 
1191             mpd = (struct mpd_data *)ccb->csio.data_ptr;
1192             bzero(mpd, sizeof(struct mpd_data));
1193             mpd->hd.datalen = sizeof(struct scsi_mode_hdr_6) +
1194                 sizeof(struct scsi_mode_block_descr);
1195             mpd->hd.dev_specific = (gdt->sc_hdr[t].hd_devtype & 2) ? 0x80 : 0;
1196             mpd->hd.block_descr_len = sizeof(struct scsi_mode_block_descr);
1197             mpd->bd.block_len[0] = (GDT_SECTOR_SIZE & 0x00ff0000) >> 16;
1198             mpd->bd.block_len[1] = (GDT_SECTOR_SIZE & 0x0000ff00) >> 8;
1199             mpd->bd.block_len[2] = (GDT_SECTOR_SIZE & 0x000000ff);
1200             page=((struct scsi_mode_sense_6 *)ccb->csio.cdb_io.cdb_bytes)->page;
1201             switch (page) {
1202               default:
1203                 GDT_DPRINTF(GDT_D_MISC, ("MODE_SENSE_6: page 0x%x\n", page));
1204                 break;
1205             }
1206             break;
1207         }
1208       case READ_CAPACITY:
1209         {
1210             struct scsi_read_capacity_data *rcd;
1211 
1212             rcd = (struct scsi_read_capacity_data *)ccb->csio.data_ptr;
1213             bzero(rcd, sizeof(struct scsi_read_capacity_data));
1214             scsi_ulto4b(gdt->sc_hdr[t].hd_size - 1, rcd->addr);
1215             scsi_ulto4b(GDT_SECTOR_SIZE, rcd->length);
1216             break;
1217         }
1218       default:
1219         GDT_DPRINTF(GDT_D_MISC, ("gdt_internal_cache_cmd(%d) unknown\n",
1220                                     ccb->csio.cdb_io.cdb_bytes[0]));
1221         break;
1222     }
1223     ccb->ccb_h.status = CAM_REQ_CMP;
1224     --gdt_stat.io_count_act;
1225     xpt_done(ccb);
1226 }
1227 
1228 static void
1229 gdtmapmem(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1230 {
1231     bus_addr_t *busaddrp;
1232 
1233     busaddrp = (bus_addr_t *)arg;
1234     *busaddrp = dm_segs->ds_addr;
1235 }
1236 
1237 static void
1238 gdtexecuteccb(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1239 {
1240     struct gdt_ccb *gccb;
1241     union ccb *ccb;
1242     struct gdt_softc *gdt;
1243     int i;
1244 
1245     crit_enter();
1246 
1247     gccb = (struct gdt_ccb *)arg;
1248     ccb = gccb->gc_ccb;
1249     gdt = cam_sim_softc((struct cam_sim *)ccb->ccb_h.ccb_sim_ptr);
1250 
1251     GDT_DPRINTF(GDT_D_CMD, ("gdtexecuteccb(%p, %p, %p, %d, %d)\n",
1252                             gdt, gccb, dm_segs, nseg, error));
1253     gdt_stat.sg_count_act = nseg;
1254     if (nseg > gdt_stat.sg_count_max)
1255         gdt_stat.sg_count_max = nseg;
1256 
1257     /* Copy the segments into our SG list */
1258     if (gccb->gc_service == GDT_CACHESERVICE) {
1259         for (i = 0; i < nseg; ++i) {
1260             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1261                       i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1262             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
1263                       i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1264             dm_segs++;
1265         }
1266         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ,
1267                   nseg);
1268         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
1269                   0xffffffffUL);
1270 
1271         gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST +
1272                                   nseg * GDT_SG_SZ, sizeof(u_int32_t));
1273     } else {
1274         for (i = 0; i < nseg; ++i) {
1275             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1276                       i * GDT_SG_SZ + GDT_SG_PTR, dm_segs->ds_addr);
1277             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_LST +
1278                       i * GDT_SG_SZ + GDT_SG_LEN, dm_segs->ds_len);
1279             dm_segs++;
1280         }
1281         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SG_RANZ,
1282                   nseg);
1283         gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_RAW_SDATA,
1284                   0xffffffffUL);
1285 
1286         gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_RAW_SG_LST +
1287                                   nseg * GDT_SG_SZ, sizeof(u_int32_t));
1288     }
1289 
1290     if (nseg != 0) {
1291         bus_dmasync_op_t op;
1292 
1293         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1294             op = BUS_DMASYNC_PREREAD;
1295         else
1296             op = BUS_DMASYNC_PREWRITE;
1297         bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, op);
1298     }
1299 
1300     /* We must NOT abort the command here if CAM_REQ_INPROG is not set,
1301      * because command semaphore is already set!
1302      */
1303 
1304     ccb->ccb_h.status |= CAM_SIM_QUEUED;
1305     /* timeout handling */
1306     callout_reset(&ccb->ccb_h.timeout_ch, (ccb->ccb_h.timeout * hz) / 1000,
1307         iir_timeout, gccb);
1308 
1309     gdt->sc_copy_cmd(gdt, gccb);
1310     crit_exit();
1311 }
1312 
1313 
1314 static void
1315 iir_action( struct cam_sim *sim, union ccb *ccb )
1316 {
1317     struct gdt_softc *gdt;
1318     int bus, target, lun;
1319 
1320     gdt = (struct gdt_softc *)cam_sim_softc( sim );
1321     ccb->ccb_h.ccb_sim_ptr = sim;
1322     bus = cam_sim_bus(sim);
1323     target = ccb->ccb_h.target_id;
1324     lun = ccb->ccb_h.target_lun;
1325     GDT_DPRINTF(GDT_D_CMD,
1326                 ("iir_action(%p) func 0x%x cmd 0x%x bus %d target %d lun %d\n",
1327                  gdt, ccb->ccb_h.func_code, ccb->csio.cdb_io.cdb_bytes[0],
1328                  bus, target, lun));
1329     ++gdt_stat.io_count_act;
1330     if (gdt_stat.io_count_act > gdt_stat.io_count_max)
1331         gdt_stat.io_count_max = gdt_stat.io_count_act;
1332 
1333     switch (ccb->ccb_h.func_code) {
1334       case XPT_SCSI_IO:
1335 	crit_enter();
1336         TAILQ_INSERT_TAIL(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1337         ++gdt_stat.req_queue_act;
1338         if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1339             gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1340 	crit_exit();
1341         gdt_next(gdt);
1342         break;
1343       case XPT_RESET_DEV:   /* Bus Device Reset the specified SCSI device */
1344       case XPT_ABORT:                       /* Abort the specified CCB */
1345         /* XXX Implement */
1346         ccb->ccb_h.status = CAM_REQ_INVALID;
1347         --gdt_stat.io_count_act;
1348         xpt_done(ccb);
1349         break;
1350       case XPT_SET_TRAN_SETTINGS:
1351         ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1352         --gdt_stat.io_count_act;
1353         xpt_done(ccb);
1354         break;
1355       case XPT_GET_TRAN_SETTINGS:
1356         /* Get default/user set transfer settings for the target */
1357           {
1358               struct        ccb_trans_settings *cts = &ccb->cts;
1359               struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
1360               struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
1361 
1362               cts->protocol = PROTO_SCSI;
1363               cts->protocol_version = SCSI_REV_2;
1364               cts->transport = XPORT_SPI;
1365               cts->transport_version = 2;
1366 
1367               if (cts->type == CTS_TYPE_USER_SETTINGS) {
1368 		  spi->flags = CTS_SPI_FLAGS_DISC_ENB;
1369                   scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
1370                   spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
1371                   spi->sync_period = 25; /* 10MHz */
1372                   if (spi->sync_period != 0)
1373                       spi->sync_offset = 15;
1374 
1375                   spi->valid = CTS_SPI_VALID_SYNC_RATE
1376                       | CTS_SPI_VALID_SYNC_OFFSET
1377                       | CTS_SPI_VALID_BUS_WIDTH
1378                       | CTS_SPI_VALID_DISC;
1379                   scsi->valid = CTS_SCSI_VALID_TQ;
1380                   ccb->ccb_h.status = CAM_REQ_CMP;
1381               } else {
1382                   ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1383               }
1384               --gdt_stat.io_count_act;
1385               xpt_done(ccb);
1386               break;
1387           }
1388       case XPT_CALC_GEOMETRY:
1389           {
1390               struct ccb_calc_geometry *ccg;
1391               u_int32_t secs_per_cylinder;
1392 
1393               ccg = &ccb->ccg;
1394               ccg->heads = gdt->sc_hdr[target].hd_heads;
1395               ccg->secs_per_track = gdt->sc_hdr[target].hd_secs;
1396               secs_per_cylinder = ccg->heads * ccg->secs_per_track;
1397               ccg->cylinders = ccg->volume_size / secs_per_cylinder;
1398               ccb->ccb_h.status = CAM_REQ_CMP;
1399               --gdt_stat.io_count_act;
1400               xpt_done(ccb);
1401               break;
1402           }
1403       case XPT_RESET_BUS:           /* Reset the specified SCSI bus */
1404           {
1405               /* XXX Implement */
1406               ccb->ccb_h.status = CAM_REQ_CMP;
1407               --gdt_stat.io_count_act;
1408               xpt_done(ccb);
1409               break;
1410           }
1411       case XPT_TERM_IO:             /* Terminate the I/O process */
1412         /* XXX Implement */
1413         ccb->ccb_h.status = CAM_REQ_INVALID;
1414         --gdt_stat.io_count_act;
1415         xpt_done(ccb);
1416         break;
1417       case XPT_PATH_INQ:            /* Path routing inquiry */
1418           {
1419               struct ccb_pathinq *cpi = &ccb->cpi;
1420 
1421               cpi->version_num = 1;
1422               cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE;
1423               cpi->hba_inquiry |= PI_WIDE_16;
1424               cpi->target_sprt = 1;
1425               cpi->hba_misc = 0;
1426               cpi->hba_eng_cnt = 0;
1427               if (bus == gdt->sc_virt_bus)
1428                   cpi->max_target = GDT_MAX_HDRIVES - 1;
1429               else if (gdt->sc_class & GDT_FC)
1430                   cpi->max_target = GDT_MAXID_FC - 1;
1431               else
1432                   cpi->max_target = GDT_MAXID - 1;
1433               cpi->max_lun = 7;
1434               cpi->unit_number = cam_sim_unit(sim);
1435               cpi->bus_id = bus;
1436               cpi->initiator_id =
1437                   (bus == gdt->sc_virt_bus ? 127 : gdt->sc_bus_id[bus]);
1438               cpi->base_transfer_speed = 3300;
1439               strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1440               strncpy(cpi->hba_vid, "Intel Corp.", HBA_IDLEN);
1441               strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1442               cpi->transport = XPORT_SPI;
1443               cpi->transport_version = 2;
1444               cpi->protocol = PROTO_SCSI;
1445               cpi->protocol_version = SCSI_REV_2;
1446               cpi->ccb_h.status = CAM_REQ_CMP;
1447               --gdt_stat.io_count_act;
1448               xpt_done(ccb);
1449               break;
1450           }
1451       default:
1452         GDT_DPRINTF(GDT_D_INVALID, ("gdt_next(%p) cmd 0x%x invalid\n",
1453                                     gdt, ccb->ccb_h.func_code));
1454         ccb->ccb_h.status = CAM_REQ_INVALID;
1455         --gdt_stat.io_count_act;
1456         xpt_done(ccb);
1457         break;
1458     }
1459 }
1460 
1461 static void
1462 iir_poll( struct cam_sim *sim )
1463 {
1464     struct gdt_softc *gdt;
1465 
1466     gdt = (struct gdt_softc *)cam_sim_softc( sim );
1467     GDT_DPRINTF(GDT_D_CMD, ("iir_poll sim %p gdt %p\n", sim, gdt));
1468     iir_intr(gdt);
1469 }
1470 
1471 static void
1472 iir_timeout(void *arg)
1473 {
1474     GDT_DPRINTF(GDT_D_TIMEOUT, ("iir_timeout(%p)\n", arg));
1475 }
1476 
1477 static void
1478 iir_watchdog(void *arg)
1479 {
1480     struct gdt_softc *gdt;
1481 
1482     gdt = (struct gdt_softc *)arg;
1483     GDT_DPRINTF(GDT_D_DEBUG, ("iir_watchdog(%p)\n", gdt));
1484 
1485     {
1486         int ccbs = 0, ucmds = 0, frees = 0, pends = 0;
1487         struct gdt_ccb *p;
1488         struct ccb_hdr *h;
1489         struct gdt_ucmd *u;
1490 
1491         for (h = TAILQ_FIRST(&gdt->sc_ccb_queue); h != NULL;
1492              h = TAILQ_NEXT(h, sim_links.tqe))
1493             ccbs++;
1494         for (u = TAILQ_FIRST(&gdt->sc_ucmd_queue); u != NULL;
1495              u = TAILQ_NEXT(u, links))
1496             ucmds++;
1497         for (p = SLIST_FIRST(&gdt->sc_free_gccb); p != NULL;
1498              p = SLIST_NEXT(p, sle))
1499             frees++;
1500         for (p = SLIST_FIRST(&gdt->sc_pending_gccb); p != NULL;
1501              p = SLIST_NEXT(p, sle))
1502             pends++;
1503 
1504         GDT_DPRINTF(GDT_D_TIMEOUT, ("ccbs %d ucmds %d frees %d pends %d\n",
1505                                     ccbs, ucmds, frees, pends));
1506     }
1507 
1508     callout_reset(&gdt->watchdog_timer, hz * 15, iir_watchdog, gdt);
1509 }
1510 
1511 static void
1512 iir_shutdown( void *arg, int howto )
1513 {
1514     struct gdt_softc *gdt;
1515     struct gdt_ccb *gccb;
1516     gdt_ucmd_t *ucmd;
1517     int i;
1518 
1519     gdt = (struct gdt_softc *)arg;
1520     GDT_DPRINTF(GDT_D_CMD, ("iir_shutdown(%p, %d)\n", gdt, howto));
1521 
1522     kprintf("iir%d: Flushing all Host Drives. Please wait ...  ",
1523            gdt->sc_hanum);
1524 
1525     /* allocate ucmd buffer */
1526     ucmd = kmalloc(sizeof(gdt_ucmd_t), M_DEVBUF, M_INTWAIT | M_ZERO);
1527 
1528     /* wait for pending IOs */
1529     crit_enter();
1530     gdt->sc_state = GDT_SHUTDOWN;
1531     crit_exit();
1532     if ((gccb = SLIST_FIRST(&gdt->sc_pending_gccb)) != NULL)
1533         (void) tsleep((void *)gccb, PCATCH, "iirshw", 100 * hz);
1534 
1535     /* flush */
1536     for (i = 0; i < GDT_MAX_HDRIVES; ++i) {
1537         if (gdt->sc_hdr[i].hd_present) {
1538             ucmd->service = GDT_CACHESERVICE;
1539             ucmd->OpCode = GDT_FLUSH;
1540             ucmd->u.cache.DeviceNo = i;
1541 	    crit_enter();
1542             TAILQ_INSERT_TAIL(&gdt->sc_ucmd_queue, ucmd, links);
1543             ucmd->complete_flag = FALSE;
1544 	    crit_exit();
1545             gdt_next(gdt);
1546             if (!ucmd->complete_flag)
1547                 (void) tsleep((void *)ucmd, PCATCH, "iirshw", 10*hz);
1548         }
1549     }
1550 
1551     kfree(ucmd, M_DEVBUF);
1552     kprintf("Done.\n");
1553 }
1554 
1555 void
1556 iir_intr(void *arg)
1557 {
1558     struct gdt_softc *gdt = arg;
1559     struct gdt_intr_ctx ctx;
1560     struct gdt_ccb *gccb;
1561     gdt_ucmd_t *ucmd;
1562     u_int32_t cnt;
1563 
1564     GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p)\n", gdt));
1565 
1566     /* If polling and we were not called from gdt_wait, just return */
1567     if ((gdt->sc_state & GDT_POLLING) &&
1568         !(gdt->sc_state & GDT_POLL_WAIT))
1569         return;
1570 
1571     if (!(gdt->sc_state & GDT_POLLING))
1572 	crit_enter();
1573     gdt_wait_index = 0;
1574 
1575     ctx.istatus = gdt->sc_get_status(gdt);
1576     if (!ctx.istatus) {
1577         if (!(gdt->sc_state & GDT_POLLING))
1578 	    crit_exit();
1579         gdt->sc_status = GDT_S_NO_STATUS;
1580         return;
1581     }
1582 
1583     gdt->sc_intr(gdt, &ctx);
1584 
1585     gdt->sc_status = ctx.cmd_status;
1586     gdt->sc_service = ctx.service;
1587     gdt->sc_info = ctx.info;
1588     gdt->sc_info2 = ctx.info2;
1589 
1590     if (gdt->sc_state & GDT_POLL_WAIT) {
1591         gdt_wait_gdt = gdt;
1592         gdt_wait_index = ctx.istatus;
1593     }
1594 
1595     if (ctx.istatus == GDT_ASYNCINDEX) {
1596         gdt_async_event(gdt, ctx.service);
1597         if (!(gdt->sc_state & GDT_POLLING))
1598 	    crit_exit();
1599         return;
1600     }
1601     if (ctx.istatus == GDT_SPEZINDEX) {
1602         GDT_DPRINTF(GDT_D_INVALID,
1603                     ("iir%d: Service unknown or not initialized!\n",
1604                      gdt->sc_hanum));
1605         gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1606         gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1607         gdt_store_event(GDT_ES_DRIVER, 4, &gdt->sc_dvr);
1608         if (!(gdt->sc_state & GDT_POLLING))
1609 	    crit_exit();
1610         return;
1611     }
1612 
1613     gccb = &gdt->sc_gccbs[ctx.istatus - 2];
1614     ctx.service = gccb->gc_service;
1615 
1616     switch (gccb->gc_flags) {
1617       case GDT_GCF_UNUSED:
1618         GDT_DPRINTF(GDT_D_INVALID, ("iir%d: Index (%d) to unused command!\n",
1619                     gdt->sc_hanum, ctx.istatus));
1620         gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.driver);
1621         gdt->sc_dvr.eu.driver.ionode = gdt->sc_hanum;
1622         gdt->sc_dvr.eu.driver.index = ctx.istatus;
1623         gdt_store_event(GDT_ES_DRIVER, 1, &gdt->sc_dvr);
1624         gdt_free_ccb(gdt, gccb);
1625         /* fallthrough */
1626 
1627       case GDT_GCF_INTERNAL:
1628         if (!(gdt->sc_state & GDT_POLLING))
1629 	    crit_exit();
1630         break;
1631 
1632       case GDT_GCF_IOCTL:
1633         ucmd = gccb->gc_ucmd;
1634         if (gdt->sc_status == GDT_S_BSY) {
1635             GDT_DPRINTF(GDT_D_DEBUG, ("iir_intr(%p) ioctl: gccb %p busy\n",
1636                                       gdt, gccb));
1637             TAILQ_INSERT_HEAD(&gdt->sc_ucmd_queue, ucmd, links);
1638             if (!(gdt->sc_state & GDT_POLLING))
1639 		crit_exit();
1640         } else {
1641             ucmd->status = gdt->sc_status;
1642             ucmd->info = gdt->sc_info;
1643             ucmd->complete_flag = TRUE;
1644             if (ucmd->service == GDT_CACHESERVICE) {
1645                 if (ucmd->OpCode == GDT_IOCTL) {
1646                     cnt = ucmd->u.ioctl.param_size;
1647                     if (cnt != 0)
1648                         bcopy(gccb->gc_scratch, ucmd->data, cnt);
1649                 } else {
1650                     cnt = ucmd->u.cache.BlockCnt * GDT_SECTOR_SIZE;
1651                     if (cnt != 0)
1652                         bcopy(gccb->gc_scratch, ucmd->data, cnt);
1653                 }
1654             } else {
1655                 cnt = ucmd->u.raw.sdlen;
1656                 if (cnt != 0)
1657                     bcopy(gccb->gc_scratch, ucmd->data, cnt);
1658                 if (ucmd->u.raw.sense_len != 0)
1659                     bcopy(gccb->gc_scratch, ucmd->data, cnt);
1660             }
1661             gdt_free_ccb(gdt, gccb);
1662             if (!(gdt->sc_state & GDT_POLLING))
1663 		crit_exit();
1664             /* wakeup */
1665             wakeup(ucmd);
1666         }
1667         gdt_next(gdt);
1668         break;
1669 
1670       default:
1671         gdt_free_ccb(gdt, gccb);
1672         gdt_sync_event(gdt, ctx.service, ctx.istatus, gccb);
1673         if (!(gdt->sc_state & GDT_POLLING))
1674 	    crit_exit();
1675         gdt_next(gdt);
1676         break;
1677     }
1678 }
1679 
1680 int
1681 gdt_async_event(struct gdt_softc *gdt, int service)
1682 {
1683     struct gdt_ccb *gccb;
1684 
1685     GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d)\n", gdt, service));
1686 
1687     if (service == GDT_SCREENSERVICE) {
1688         if (gdt->sc_status == GDT_MSG_REQUEST) {
1689             while (gdt->sc_test_busy(gdt))
1690                 DELAY(1);
1691             bzero(gdt->sc_cmd, GDT_CMD_SZ);
1692             gccb = gdt_get_ccb(gdt);
1693             if (gccb == NULL) {
1694                 kprintf("iir%d: No free command index found\n",
1695                        gdt->sc_hanum);
1696                 return (1);
1697             }
1698             gccb->gc_service = service;
1699             gccb->gc_flags = GDT_GCF_SCREEN;
1700             gdt->sc_set_sema0(gdt);
1701             gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1702                       gccb->gc_cmd_index);
1703             gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1704             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1705                       GDT_MSG_INV_HANDLE);
1706             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1707                       gdt_ccb_vtop(gdt, gccb) +
1708                       offsetof(struct gdt_ccb, gc_scratch[0]));
1709             gdt->sc_cmd_off = 0;
1710             gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1711                                       sizeof(u_int32_t));
1712             gdt->sc_cmd_cnt = 0;
1713             gdt->sc_copy_cmd(gdt, gccb);
1714             kprintf("iir%d: [PCI %d/%d] ",
1715                 gdt->sc_hanum,gdt->sc_bus,gdt->sc_slot);
1716             gdt->sc_release_event(gdt);
1717         }
1718 
1719     } else {
1720         if ((gdt->sc_fw_vers & 0xff) >= 0x1a) {
1721             gdt->sc_dvr.size = 0;
1722             gdt->sc_dvr.eu.async.ionode = gdt->sc_hanum;
1723             gdt->sc_dvr.eu.async.status  = gdt->sc_status;
1724             /* severity and event_string already set! */
1725         } else {
1726             gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.async);
1727             gdt->sc_dvr.eu.async.ionode   = gdt->sc_hanum;
1728             gdt->sc_dvr.eu.async.service = service;
1729             gdt->sc_dvr.eu.async.status  = gdt->sc_status;
1730             gdt->sc_dvr.eu.async.info    = gdt->sc_info;
1731             *(u_int32_t *)gdt->sc_dvr.eu.async.scsi_coord  = gdt->sc_info2;
1732         }
1733         gdt_store_event(GDT_ES_ASYNC, service, &gdt->sc_dvr);
1734         kprintf("iir%d: %s\n", gdt->sc_hanum, gdt->sc_dvr.event_string);
1735     }
1736 
1737     return (0);
1738 }
1739 
1740 int
1741 gdt_sync_event(struct gdt_softc *gdt, int service,
1742                u_int8_t index, struct gdt_ccb *gccb)
1743 {
1744     union ccb *ccb;
1745     bus_dmasync_op_t op;
1746 
1747     GDT_DPRINTF(GDT_D_INTR,
1748                 ("gdt_sync_event(%p, %d, %d, %p)\n", gdt,service,index,gccb));
1749 
1750     ccb = gccb->gc_ccb;
1751 
1752     if (service == GDT_SCREENSERVICE) {
1753         u_int32_t msg_len;
1754 
1755         msg_len = gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_LEN);
1756         if (msg_len)
1757             if (!(gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1758                   gccb->gc_scratch[GDT_SCR_MSG_EXT])) {
1759                 gccb->gc_scratch[GDT_SCR_MSG_TEXT + msg_len] = '\0';
1760                 kprintf("%s",&gccb->gc_scratch[GDT_SCR_MSG_TEXT]);
1761             }
1762 
1763         if (gccb->gc_scratch[GDT_SCR_MSG_EXT] &&
1764             !gccb->gc_scratch[GDT_SCR_MSG_ANSWER]) {
1765             while (gdt->sc_test_busy(gdt))
1766                 DELAY(1);
1767             bzero(gdt->sc_cmd, GDT_CMD_SZ);
1768             gccb = gdt_get_ccb(gdt);
1769             if (gccb == NULL) {
1770                 kprintf("iir%d: No free command index found\n",
1771                        gdt->sc_hanum);
1772                 return (1);
1773             }
1774             gccb->gc_service = service;
1775             gccb->gc_flags = GDT_GCF_SCREEN;
1776             gdt->sc_set_sema0(gdt);
1777             gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1778                       gccb->gc_cmd_index);
1779             gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_READ);
1780             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1781                       gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1782             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1783                       gdt_ccb_vtop(gdt, gccb) +
1784                       offsetof(struct gdt_ccb, gc_scratch[0]));
1785             gdt->sc_cmd_off = 0;
1786             gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1787                                       sizeof(u_int32_t));
1788             gdt->sc_cmd_cnt = 0;
1789             gdt->sc_copy_cmd(gdt, gccb);
1790             gdt->sc_release_event(gdt);
1791             return (0);
1792         }
1793 
1794         if (gccb->gc_scratch[GDT_SCR_MSG_ANSWER] &&
1795             gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN)) {
1796             /* default answers (getchar() not possible) */
1797             if (gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) == 1) {
1798                 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN, 0);
1799                 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 1);
1800                 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 0;
1801             } else {
1802                 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_ALEN,
1803                           gdt_dec32(gccb->gc_scratch + GDT_SCR_MSG_ALEN) - 2);
1804                 gdt_enc32(gccb->gc_scratch + GDT_SCR_MSG_LEN, 2);
1805                 gccb->gc_scratch[GDT_SCR_MSG_TEXT] = 1;
1806                 gccb->gc_scratch[GDT_SCR_MSG_TEXT + 1] = 0;
1807             }
1808             gccb->gc_scratch[GDT_SCR_MSG_EXT] = 0;
1809             gccb->gc_scratch[GDT_SCR_MSG_ANSWER] = 0;
1810             while (gdt->sc_test_busy(gdt))
1811                 DELAY(1);
1812             bzero(gdt->sc_cmd, GDT_CMD_SZ);
1813             gccb = gdt_get_ccb(gdt);
1814             if (gccb == NULL) {
1815                 kprintf("iir%d: No free command index found\n",
1816                        gdt->sc_hanum);
1817                 return (1);
1818             }
1819             gccb->gc_service = service;
1820             gccb->gc_flags = GDT_GCF_SCREEN;
1821             gdt->sc_set_sema0(gdt);
1822             gdt_enc32(gdt->sc_cmd + GDT_CMD_COMMANDINDEX,
1823                       gccb->gc_cmd_index);
1824             gdt_enc16(gdt->sc_cmd + GDT_CMD_OPCODE, GDT_WRITE);
1825             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_HANDLE,
1826                       gccb->gc_scratch[GDT_SCR_MSG_HANDLE]);
1827             gdt_enc32(gdt->sc_cmd + GDT_CMD_UNION + GDT_SCREEN_MSG_ADDR,
1828                       gdt_ccb_vtop(gdt, gccb) +
1829                       offsetof(struct gdt_ccb, gc_scratch[0]));
1830             gdt->sc_cmd_off = 0;
1831             gdt->sc_cmd_len = roundup(GDT_CMD_UNION + GDT_SCREEN_SZ,
1832                                       sizeof(u_int32_t));
1833             gdt->sc_cmd_cnt = 0;
1834             gdt->sc_copy_cmd(gdt, gccb);
1835             gdt->sc_release_event(gdt);
1836             return (0);
1837         }
1838         kprintf("\n");
1839         return (0);
1840     } else {
1841         callout_stop(&ccb->ccb_h.timeout_ch);
1842         if (gdt->sc_status == GDT_S_BSY) {
1843             GDT_DPRINTF(GDT_D_DEBUG, ("gdt_sync_event(%p) gccb %p busy\n",
1844                                       gdt, gccb));
1845             TAILQ_INSERT_HEAD(&gdt->sc_ccb_queue, &ccb->ccb_h, sim_links.tqe);
1846             ++gdt_stat.req_queue_act;
1847             if (gdt_stat.req_queue_act > gdt_stat.req_queue_max)
1848                 gdt_stat.req_queue_max = gdt_stat.req_queue_act;
1849             return (2);
1850         }
1851 
1852         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
1853             op = BUS_DMASYNC_POSTREAD;
1854         else
1855             op = BUS_DMASYNC_POSTWRITE;
1856         bus_dmamap_sync(gdt->sc_buffer_dmat, gccb->gc_dmamap, op);
1857 
1858         ccb->csio.resid = 0;
1859         if (gdt->sc_status == GDT_S_OK) {
1860             ccb->ccb_h.status = CAM_REQ_CMP;
1861         } else {
1862             /* error */
1863             if (gccb->gc_service == GDT_CACHESERVICE) {
1864                 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
1865                 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1866                 bzero(&ccb->csio.sense_data, ccb->csio.sense_len);
1867                 ccb->csio.sense_data.error_code =
1868                     SSD_CURRENT_ERROR | SSD_ERRCODE_VALID;
1869                 ccb->csio.sense_data.flags = SSD_KEY_NOT_READY;
1870 
1871                 gdt->sc_dvr.size = sizeof(gdt->sc_dvr.eu.sync);
1872                 gdt->sc_dvr.eu.sync.ionode  = gdt->sc_hanum;
1873                 gdt->sc_dvr.eu.sync.service = service;
1874                 gdt->sc_dvr.eu.sync.status  = gdt->sc_status;
1875                 gdt->sc_dvr.eu.sync.info    = gdt->sc_info;
1876                 gdt->sc_dvr.eu.sync.hostdrive = ccb->ccb_h.target_id;
1877                 if (gdt->sc_status >= 0x8000)
1878                     gdt_store_event(GDT_ES_SYNC, 0, &gdt->sc_dvr);
1879                 else
1880                     gdt_store_event(GDT_ES_SYNC, service, &gdt->sc_dvr);
1881             } else {
1882                 /* raw service */
1883                 if (gdt->sc_status != GDT_S_RAW_SCSI || gdt->sc_info >= 0x100) {
1884                     ccb->ccb_h.status = CAM_SEL_TIMEOUT;
1885                 } else {
1886                     ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR|CAM_AUTOSNS_VALID;
1887                     ccb->csio.scsi_status = gdt->sc_info;
1888                     bcopy(gccb->gc_scratch, &ccb->csio.sense_data,
1889                           ccb->csio.sense_len);
1890                 }
1891             }
1892         }
1893         --gdt_stat.io_count_act;
1894         xpt_done(ccb);
1895     }
1896     return (0);
1897 }
1898 
1899 /* Controller event handling functions */
1900 gdt_evt_str *
1901 gdt_store_event(u_int16_t source, u_int16_t idx, gdt_evt_data *evt)
1902 {
1903     gdt_evt_str *e;
1904     struct timeval tv;
1905 
1906     GDT_DPRINTF(GDT_D_MISC, ("gdt_store_event(%d, %d)\n", source, idx));
1907     if (source == 0)                        /* no source -> no event */
1908         return 0;
1909 
1910     if (ebuffer[elastidx].event_source == source &&
1911         ebuffer[elastidx].event_idx == idx &&
1912         ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
1913           !memcmp((char *)&ebuffer[elastidx].event_data.eu,
1914                   (char *)&evt->eu, evt->size)) ||
1915          (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
1916           !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
1917                   (char *)&evt->event_string)))) {
1918         e = &ebuffer[elastidx];
1919         getmicrotime(&tv);
1920         e->last_stamp = tv.tv_sec;
1921         ++e->same_count;
1922     } else {
1923         if (ebuffer[elastidx].event_source != 0) {  /* entry not free ? */
1924             ++elastidx;
1925             if (elastidx == GDT_MAX_EVENTS)
1926                 elastidx = 0;
1927             if (elastidx == eoldidx) {              /* reached mark ? */
1928                 ++eoldidx;
1929                 if (eoldidx == GDT_MAX_EVENTS)
1930                     eoldidx = 0;
1931             }
1932         }
1933         e = &ebuffer[elastidx];
1934         e->event_source = source;
1935         e->event_idx = idx;
1936         getmicrotime(&tv);
1937         e->first_stamp = e->last_stamp = tv.tv_sec;
1938         e->same_count = 1;
1939         e->event_data = *evt;
1940         e->application = 0;
1941     }
1942     return e;
1943 }
1944 
1945 int
1946 gdt_read_event(int handle, gdt_evt_str *estr)
1947 {
1948     gdt_evt_str *e;
1949     int eindex;
1950 
1951     GDT_DPRINTF(GDT_D_MISC, ("gdt_read_event(%d)\n", handle));
1952     crit_enter();
1953     if (handle == -1)
1954         eindex = eoldidx;
1955     else
1956         eindex = handle;
1957     estr->event_source = 0;
1958 
1959     if (eindex >= GDT_MAX_EVENTS) {
1960 	crit_exit();
1961         return eindex;
1962     }
1963     e = &ebuffer[eindex];
1964     if (e->event_source != 0) {
1965         if (eindex != elastidx) {
1966             if (++eindex == GDT_MAX_EVENTS)
1967                 eindex = 0;
1968         } else {
1969             eindex = -1;
1970         }
1971         memcpy(estr, e, sizeof(gdt_evt_str));
1972     }
1973     crit_exit();
1974     return eindex;
1975 }
1976 
1977 void
1978 gdt_readapp_event(u_int8_t application, gdt_evt_str *estr)
1979 {
1980     gdt_evt_str *e;
1981     int found = FALSE;
1982     int eindex;
1983 
1984     GDT_DPRINTF(GDT_D_MISC, ("gdt_readapp_event(%d)\n", application));
1985     crit_enter();
1986     eindex = eoldidx;
1987     for (;;) {
1988         e = &ebuffer[eindex];
1989         if (e->event_source == 0)
1990             break;
1991         if ((e->application & application) == 0) {
1992             e->application |= application;
1993             found = TRUE;
1994             break;
1995         }
1996         if (eindex == elastidx)
1997             break;
1998         if (++eindex == GDT_MAX_EVENTS)
1999             eindex = 0;
2000     }
2001     if (found)
2002         memcpy(estr, e, sizeof(gdt_evt_str));
2003     else
2004         estr->event_source = 0;
2005     crit_exit();
2006 }
2007 
2008 void
2009 gdt_clear_events(void)
2010 {
2011     GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events\n"));
2012 
2013     eoldidx = elastidx = 0;
2014     ebuffer[0].event_source = 0;
2015 }
2016