1 /* $OpenBSD: gdt_common.c,v 1.85 2022/04/16 19:19:59 naddy Exp $ */
2
3 /*
4 * Copyright (c) 1999, 2000, 2003 Niklas Hallqvist. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27 /*
28 * This driver would not have written if it was not for the hardware donations
29 * from both ICP-Vortex and �ko.neT. I want to thank them for their support.
30 */
31
32 #include <sys/param.h>
33 #include <sys/buf.h>
34 #include <sys/device.h>
35 #include <sys/ioctl.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/systm.h>
39
40 #include <machine/bus.h>
41
42 #include <scsi/scsi_all.h>
43 #include <scsi/scsi_disk.h>
44 #include <scsi/scsiconf.h>
45
46 #include <dev/biovar.h>
47 #include <dev/ic/gdtreg.h>
48 #include <dev/ic/gdtvar.h>
49
50 #include "bio.h"
51
52 #ifdef GDT_DEBUG
53 int gdt_maxcmds = GDT_MAXCMDS;
54 #undef GDT_MAXCMDS
55 #define GDT_MAXCMDS gdt_maxcmds
56 #endif
57
58 #define GDT_DRIVER_VERSION 1
59 #define GDT_DRIVER_SUBVERSION 2
60
61 int gdt_async_event(struct gdt_softc *, int);
62 void gdt_chain(struct gdt_softc *);
63 void gdt_clear_events(struct gdt_softc *);
64 struct scsi_xfer *gdt_dequeue(struct gdt_softc *);
65 void gdt_enqueue(struct gdt_softc *, struct scsi_xfer *, int);
66 void gdt_enqueue_ccb(struct gdt_softc *, struct gdt_ccb *);
67 void gdt_eval_mapping(u_int32_t, int *, int *, int *);
68 int gdt_exec_ccb(struct gdt_ccb *);
69 void gdt_ccb_free(void *, void *);
70 void *gdt_ccb_alloc(void *);
71 void gdt_internal_cache_cmd(struct scsi_xfer *);
72 int gdt_internal_cmd(struct gdt_softc *, u_int8_t, u_int16_t,
73 u_int32_t, u_int32_t, u_int32_t);
74 #if NBIO > 0
75 int gdt_ioctl(struct device *, u_long, caddr_t);
76 int gdt_ioctl_inq(struct gdt_softc *, struct bioc_inq *);
77 int gdt_ioctl_vol(struct gdt_softc *, struct bioc_vol *);
78 int gdt_ioctl_disk(struct gdt_softc *, struct bioc_disk *);
79 int gdt_ioctl_alarm(struct gdt_softc *, struct bioc_alarm *);
80 int gdt_ioctl_setstate(struct gdt_softc *, struct bioc_setstate *);
81 #endif /* NBIO > 0 */
82 void gdt_scsi_cmd(struct scsi_xfer *);
83 void gdt_start_ccbs(struct gdt_softc *);
84 int gdt_sync_event(struct gdt_softc *, int, u_int8_t,
85 struct scsi_xfer *);
86 void gdt_timeout(void *);
87 int gdt_wait(struct gdt_softc *, struct gdt_ccb *, int);
88 void gdt_watchdog(void *);
89
90 struct cfdriver gdt_cd = {
91 NULL, "gdt", DV_DULL
92 };
93
94 const struct scsi_adapter gdt_switch = {
95 gdt_scsi_cmd, NULL, NULL, NULL, NULL
96 };
97
98 int gdt_cnt = 0;
99 u_int8_t gdt_polling;
100 u_int8_t gdt_from_wait;
101 struct gdt_softc *gdt_wait_gdt;
102 int gdt_wait_index;
103 #ifdef GDT_DEBUG
104 int gdt_debug = GDT_DEBUG;
105 #endif
106
107 int
gdt_attach(struct gdt_softc * sc)108 gdt_attach(struct gdt_softc *sc)
109 {
110 struct scsibus_attach_args saa;
111 u_int16_t cdev_cnt;
112 int i, id, drv_cyls, drv_hds, drv_secs, error, nsegs;
113
114 gdt_polling = 1;
115 gdt_from_wait = 0;
116
117 if (bus_dmamem_alloc(sc->sc_dmat, GDT_SCRATCH_SZ, PAGE_SIZE, 0,
118 &sc->sc_scratch_seg, 1, &nsegs, BUS_DMA_NOWAIT))
119 panic("%s: bus_dmamem_alloc failed", DEVNAME(sc));
120 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_scratch_seg, 1,
121 GDT_SCRATCH_SZ, &sc->sc_scratch, BUS_DMA_NOWAIT))
122 panic("%s: bus_dmamem_map failed", DEVNAME(sc));
123
124 gdt_clear_events(sc);
125
126 TAILQ_INIT(&sc->sc_free_ccb);
127 TAILQ_INIT(&sc->sc_ccbq);
128 SIMPLEQ_INIT(&sc->sc_queue);
129
130 mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
131 scsi_iopool_init(&sc->sc_iopool, sc, gdt_ccb_alloc, gdt_ccb_free);
132
133 /* Initialize the ccbs */
134 for (i = 0; i < GDT_MAXCMDS; i++) {
135 sc->sc_ccbs[i].gc_cmd_index = i + 2;
136 error = bus_dmamap_create(sc->sc_dmat,
137 (GDT_MAXOFFSETS - 1) << PGSHIFT, GDT_MAXOFFSETS,
138 (GDT_MAXOFFSETS - 1) << PGSHIFT, 0,
139 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
140 &sc->sc_ccbs[i].gc_dmamap_xfer);
141 if (error) {
142 printf("%s: cannot create ccb dmamap (%d)",
143 DEVNAME(sc), error);
144 return (1);
145 }
146 (void)gdt_ccb_set_cmd(sc->sc_ccbs + i, GDT_GCF_UNUSED);
147 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, &sc->sc_ccbs[i],
148 gc_chain);
149 }
150
151 if (!gdt_internal_cmd(sc, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) {
152 printf("screen service initialization error %d\n",
153 sc->sc_status);
154 return (1);
155 }
156
157 if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0,
158 0)) {
159 printf("cache service initialization error %d\n",
160 sc->sc_status);
161 return (1);
162 }
163
164 cdev_cnt = (u_int16_t)sc->sc_info;
165
166 /* Detect number of busses */
167 gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
168 sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
169 sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
170 sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
171 gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
172 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
173 GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL,
174 GDT_IOC_HDR_SZ + GDT_RAWIOC_SZ)) {
175 sc->sc_bus_cnt = sc->sc_scratch[GDT_IOC_CHAN_COUNT];
176 for (i = 0; i < sc->sc_bus_cnt; i++) {
177 id = sc->sc_scratch[GDT_IOC_HDR_SZ +
178 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID];
179 sc->sc_bus_id[id] = id < GDT_MAXBUS ? id : 0xff;
180 }
181
182 } else {
183 /* New method failed, use fallback. */
184 gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO, i);
185 for (i = 0; i < GDT_MAXBUS; i++) {
186 if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
187 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
188 GDT_IO_CHANNEL | GDT_INVALID_CHANNEL,
189 GDT_GETCH_SZ)) {
190 if (i == 0) {
191 printf("cannot get channel count, "
192 "error %d\n", sc->sc_status);
193 return (1);
194 }
195 break;
196 }
197 sc->sc_bus_id[i] =
198 (sc->sc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID) ?
199 sc->sc_scratch[GDT_GETCH_SIOP_ID] : 0xff;
200 }
201 sc->sc_bus_cnt = i;
202 }
203
204 /* Read cache configuration */
205 if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_CACHE_INFO,
206 GDT_INVALID_CHANNEL, GDT_CINFO_SZ)) {
207 printf("cannot get cache info, error %d\n", sc->sc_status);
208 return (1);
209 }
210 sc->sc_cpar.cp_version =
211 gdt_dec32(sc->sc_scratch + GDT_CPAR_VERSION);
212 sc->sc_cpar.cp_state = gdt_dec16(sc->sc_scratch + GDT_CPAR_STATE);
213 sc->sc_cpar.cp_strategy =
214 gdt_dec16(sc->sc_scratch + GDT_CPAR_STRATEGY);
215 sc->sc_cpar.cp_write_back =
216 gdt_dec16(sc->sc_scratch + GDT_CPAR_WRITE_BACK);
217 sc->sc_cpar.cp_block_size =
218 gdt_dec16(sc->sc_scratch + GDT_CPAR_BLOCK_SIZE);
219
220 /* Read board information and features */
221 sc->sc_more_proc = 0;
222 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_BOARD_INFO,
223 GDT_INVALID_CHANNEL, GDT_BINFO_SZ)) {
224 /* XXX A lot of these assignments can probably go later */
225 sc->sc_binfo.bi_ser_no =
226 gdt_dec32(sc->sc_scratch + GDT_BINFO_SER_NO);
227 bcopy(sc->sc_scratch + GDT_BINFO_OEM_ID,
228 sc->sc_binfo.bi_oem_id, sizeof sc->sc_binfo.bi_oem_id);
229 sc->sc_binfo.bi_ep_flags =
230 gdt_dec16(sc->sc_scratch + GDT_BINFO_EP_FLAGS);
231 sc->sc_binfo.bi_proc_id =
232 gdt_dec32(sc->sc_scratch + GDT_BINFO_PROC_ID);
233 sc->sc_binfo.bi_memsize =
234 gdt_dec32(sc->sc_scratch + GDT_BINFO_MEMSIZE);
235 sc->sc_binfo.bi_mem_banks =
236 sc->sc_scratch[GDT_BINFO_MEM_BANKS];
237 sc->sc_binfo.bi_chan_type =
238 sc->sc_scratch[GDT_BINFO_CHAN_TYPE];
239 sc->sc_binfo.bi_chan_count =
240 sc->sc_scratch[GDT_BINFO_CHAN_COUNT];
241 sc->sc_binfo.bi_rdongle_pres =
242 sc->sc_scratch[GDT_BINFO_RDONGLE_PRES];
243 sc->sc_binfo.bi_epr_fw_ver =
244 gdt_dec32(sc->sc_scratch + GDT_BINFO_EPR_FW_VER);
245 sc->sc_binfo.bi_upd_fw_ver =
246 gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_FW_VER);
247 sc->sc_binfo.bi_upd_revision =
248 gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_REVISION);
249 bcopy(sc->sc_scratch + GDT_BINFO_TYPE_STRING,
250 sc->sc_binfo.bi_type_string,
251 sizeof sc->sc_binfo.bi_type_string);
252 bcopy(sc->sc_scratch + GDT_BINFO_RAID_STRING,
253 sc->sc_binfo.bi_raid_string,
254 sizeof sc->sc_binfo.bi_raid_string);
255 sc->sc_binfo.bi_update_pres =
256 sc->sc_scratch[GDT_BINFO_UPDATE_PRES];
257 sc->sc_binfo.bi_xor_pres =
258 sc->sc_scratch[GDT_BINFO_XOR_PRES];
259 sc->sc_binfo.bi_prom_type =
260 sc->sc_scratch[GDT_BINFO_PROM_TYPE];
261 sc->sc_binfo.bi_prom_count =
262 sc->sc_scratch[GDT_BINFO_PROM_COUNT];
263 sc->sc_binfo.bi_dup_pres =
264 gdt_dec32(sc->sc_scratch + GDT_BINFO_DUP_PRES);
265 sc->sc_binfo.bi_chan_pres =
266 gdt_dec32(sc->sc_scratch + GDT_BINFO_CHAN_PRES);
267 sc->sc_binfo.bi_mem_pres =
268 gdt_dec32(sc->sc_scratch + GDT_BINFO_MEM_PRES);
269 sc->sc_binfo.bi_ft_bus_system =
270 sc->sc_scratch[GDT_BINFO_FT_BUS_SYSTEM];
271 sc->sc_binfo.bi_subtype_valid =
272 sc->sc_scratch[GDT_BINFO_SUBTYPE_VALID];
273 sc->sc_binfo.bi_board_subtype =
274 sc->sc_scratch[GDT_BINFO_BOARD_SUBTYPE];
275 sc->sc_binfo.bi_rampar_pres =
276 sc->sc_scratch[GDT_BINFO_RAMPAR_PRES];
277
278 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
279 GDT_BOARD_FEATURES, GDT_INVALID_CHANNEL, GDT_BFEAT_SZ)) {
280 sc->sc_bfeat.bf_chaining =
281 sc->sc_scratch[GDT_BFEAT_CHAINING];
282 sc->sc_bfeat.bf_striping =
283 sc->sc_scratch[GDT_BFEAT_STRIPING];
284 sc->sc_bfeat.bf_mirroring =
285 sc->sc_scratch[GDT_BFEAT_MIRRORING];
286 sc->sc_bfeat.bf_raid =
287 sc->sc_scratch[GDT_BFEAT_RAID];
288 sc->sc_more_proc = 1;
289 }
290 } else {
291 /* XXX Not implemented yet */
292 }
293
294 /* Read more information */
295 if (sc->sc_more_proc) {
296 int bus, j;
297 /* physical drives, channel addresses */
298 /* step 1: get magical bus number from firmware */
299 gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST);
300 sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS;
301 sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0;
302 sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1;
303 gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ);
304 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
305 GDT_IOCHAN_DESC, GDT_INVALID_CHANNEL,
306 GDT_IOC_HDR_SZ + GDT_IOC_SZ * GDT_MAXBUS)) {
307 GDT_DPRINTF(GDT_D_INFO, ("method 1\n"));
308 for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
309 sc->sc_raw[bus].ra_address =
310 gdt_dec32(sc->sc_scratch +
311 GDT_IOC_HDR_SZ +
312 GDT_IOC_SZ * bus +
313 GDT_IOC_ADDRESS);
314 sc->sc_raw[bus].ra_local_no =
315 gdt_dec8(sc->sc_scratch +
316 GDT_IOC_HDR_SZ +
317 GDT_IOC_SZ * bus +
318 GDT_IOC_LOCAL_NO);
319 GDT_DPRINTF(GDT_D_INFO, (
320 "bus: %d address: %x local: %x\n",
321 bus,
322 sc->sc_raw[bus].ra_address,
323 sc->sc_raw[bus].ra_local_no));
324 }
325 } else {
326 GDT_DPRINTF(GDT_D_INFO, ("method 2\n"));
327 for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
328 sc->sc_raw[bus].ra_address = GDT_IO_CHANNEL;
329 sc->sc_raw[bus].ra_local_no = bus;
330 GDT_DPRINTF(GDT_D_INFO, (
331 "bus: %d address: %x local: %x\n",
332 bus,
333 sc->sc_raw[bus].ra_address,
334 sc->sc_raw[bus].ra_local_no));
335 }
336 }
337 /* step 2: use magical bus number to get nr of phys disks */
338 for (bus = 0; bus < sc->sc_bus_cnt; bus++) {
339 gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO,
340 sc->sc_raw[bus].ra_local_no);
341 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL,
342 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN,
343 sc->sc_raw[bus].ra_address | GDT_INVALID_CHANNEL,
344 GDT_GETCH_SZ)) {
345 sc->sc_raw[bus].ra_phys_cnt =
346 gdt_dec32(sc->sc_scratch +
347 GDT_GETCH_DRIVE_CNT);
348 GDT_DPRINTF(GDT_D_INFO, ("chan: %d disks: %d\n",
349 bus, sc->sc_raw[bus].ra_phys_cnt));
350 }
351
352 /* step 3: get scsi disk nr */
353 if (sc->sc_raw[bus].ra_phys_cnt > 0) {
354 gdt_enc32(sc->sc_scratch +
355 GDT_GETSCSI_CHAN,
356 sc->sc_raw[bus].ra_local_no);
357 gdt_enc32(sc->sc_scratch +
358 GDT_GETSCSI_CNT,
359 sc->sc_raw[bus].ra_phys_cnt);
360 if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
361 GDT_IOCTL,
362 GDT_SCSI_DR_LIST | GDT_L_CTRL_PATTERN,
363 sc->sc_raw[bus].ra_address |
364 GDT_INVALID_CHANNEL,
365 GDT_GETSCSI_SZ))
366 for (j = 0;
367 j < sc->sc_raw[bus].ra_phys_cnt;
368 j++) {
369 sc->sc_raw[bus].ra_id_list[j] =
370 gdt_dec32(sc->sc_scratch +
371 GDT_GETSCSI_LIST +
372 GDT_GETSCSI_LIST_SZ * j);
373 GDT_DPRINTF(GDT_D_INFO,
374 (" diskid: %d\n",
375 sc->sc_raw[bus].ra_id_list[j]));
376 }
377 else
378 sc->sc_raw[bus].ra_phys_cnt = 0;
379 }
380 /* add found disks to grand total */
381 sc->sc_total_disks += sc->sc_raw[bus].ra_phys_cnt;
382 }
383 } /* if (sc->sc_more_proc) */
384
385 if (!gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) {
386 printf("raw service initialization error %d\n",
387 sc->sc_status);
388 return (1);
389 }
390
391 /* Set/get features raw service (scatter/gather) */
392 sc->sc_raw_feat = 0;
393 if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_SET_FEAT,
394 GDT_SCATTER_GATHER, 0, 0))
395 if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0,
396 0, 0))
397 sc->sc_raw_feat = sc->sc_info;
398
399 /* Set/get features cache service (scatter/gather) */
400 sc->sc_cache_feat = 0;
401 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_SET_FEAT, 0,
402 GDT_SCATTER_GATHER, 0))
403 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0,
404 0))
405 sc->sc_cache_feat = sc->sc_info;
406
407 /* XXX Linux reserve drives here, potentially */
408
409 sc->sc_ndevs = 0;
410 /* Scan for cache devices */
411 for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++)
412 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INFO, i, 0,
413 0)) {
414 sc->sc_hdr[i].hd_present = 1;
415 sc->sc_hdr[i].hd_size = sc->sc_info;
416
417 if (sc->sc_hdr[i].hd_size > 0)
418 sc->sc_ndevs++;
419
420 /*
421 * Evaluate mapping (sectors per head, heads per cyl)
422 */
423 sc->sc_hdr[i].hd_size &= ~GDT_SECS32;
424 if (sc->sc_info2 == 0)
425 gdt_eval_mapping(sc->sc_hdr[i].hd_size,
426 &drv_cyls, &drv_hds, &drv_secs);
427 else {
428 drv_hds = sc->sc_info2 & 0xff;
429 drv_secs = (sc->sc_info2 >> 8) & 0xff;
430 drv_cyls = sc->sc_hdr[i].hd_size / drv_hds /
431 drv_secs;
432 }
433 sc->sc_hdr[i].hd_heads = drv_hds;
434 sc->sc_hdr[i].hd_secs = drv_secs;
435 /* Round the size */
436 sc->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs;
437
438 if (gdt_internal_cmd(sc, GDT_CACHESERVICE,
439 GDT_DEVTYPE, i, 0, 0))
440 sc->sc_hdr[i].hd_devtype = sc->sc_info;
441 }
442
443 printf("dpmem %llx %d-bus %d cache device%s\n",
444 (long long)sc->sc_dpmembase,
445 sc->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s");
446 printf("%s: ver %x, cache %s, strategy %d, writeback %s, blksz %d\n",
447 DEVNAME(sc), sc->sc_cpar.cp_version,
448 sc->sc_cpar.cp_state ? "on" : "off", sc->sc_cpar.cp_strategy,
449 sc->sc_cpar.cp_write_back ? "on" : "off",
450 sc->sc_cpar.cp_block_size);
451 #if 1
452 printf("%s: raw feat %x cache feat %x\n", DEVNAME(sc),
453 sc->sc_raw_feat, sc->sc_cache_feat);
454 #endif
455
456 #if NBIO > 0
457 if (bio_register(&sc->sc_dev, gdt_ioctl) != 0)
458 panic("%s: controller registration failed", DEVNAME(sc));
459 #endif
460 gdt_cnt++;
461
462 saa.saa_adapter_softc = sc;
463 saa.saa_adapter = &gdt_switch;
464 saa.saa_adapter_buswidth =
465 (sc->sc_class & GDT_FC) ? GDT_MAXID : GDT_MAX_HDRIVES;
466 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
467 saa.saa_luns = 8;
468 if (sc->sc_ndevs == 0)
469 saa.saa_openings = 0;
470 else
471 saa.saa_openings = (GDT_MAXCMDS - GDT_CMD_RESERVE) /
472 sc->sc_ndevs;
473 saa.saa_pool = &sc->sc_iopool;
474 saa.saa_quirks = saa.saa_flags = 0;
475 saa.saa_wwpn = saa.saa_wwnn = 0;
476
477 config_found(&sc->sc_dev, &saa, scsiprint);
478
479 gdt_polling = 0;
480 return (0);
481 }
482
483 void
gdt_eval_mapping(u_int32_t size,int * cyls,int * heads,int * secs)484 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs)
485 {
486 *cyls = size / GDT_HEADS / GDT_SECS;
487 if (*cyls < GDT_MAXCYLS) {
488 *heads = GDT_HEADS;
489 *secs = GDT_SECS;
490 } else {
491 /* Too high for 64 * 32 */
492 *cyls = size / GDT_MEDHEADS / GDT_MEDSECS;
493 if (*cyls < GDT_MAXCYLS) {
494 *heads = GDT_MEDHEADS;
495 *secs = GDT_MEDSECS;
496 } else {
497 /* Too high for 127 * 63 */
498 *cyls = size / GDT_BIGHEADS / GDT_BIGSECS;
499 *heads = GDT_BIGHEADS;
500 *secs = GDT_BIGSECS;
501 }
502 }
503 }
504
505 /*
506 * Insert a command into the driver queue, either at the front or at the tail.
507 * It's ok to overload the freelist link as these structures are never on
508 * the freelist at this time.
509 */
510 void
gdt_enqueue(struct gdt_softc * sc,struct scsi_xfer * xs,int infront)511 gdt_enqueue(struct gdt_softc *sc, struct scsi_xfer *xs, int infront)
512 {
513 if (infront)
514 SIMPLEQ_INSERT_HEAD(&sc->sc_queue, xs, xfer_list);
515 else
516 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, xs, xfer_list);
517 }
518
519 /*
520 * Pull a command off the front of the driver queue.
521 */
522 struct scsi_xfer *
gdt_dequeue(struct gdt_softc * sc)523 gdt_dequeue(struct gdt_softc *sc)
524 {
525 struct scsi_xfer *xs;
526
527 xs = SIMPLEQ_FIRST(&sc->sc_queue);
528 if (xs != NULL)
529 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, xfer_list);
530
531 return (xs);
532 }
533
534 /*
535 * Start a SCSI operation on a cache device.
536 * XXX Polled operation is not yet complete. What kind of locking do we need?
537 */
538 void
gdt_scsi_cmd(struct scsi_xfer * xs)539 gdt_scsi_cmd(struct scsi_xfer *xs)
540 {
541 struct scsi_link *link = xs->sc_link;
542 struct gdt_softc *sc = link->bus->sb_adapter_softc;
543 u_int8_t target = link->target;
544 struct gdt_ccb *ccb;
545 u_int32_t blockno, blockcnt;
546 struct scsi_rw *rw;
547 struct scsi_rw_10 *rw10;
548 bus_dmamap_t xfer;
549 int error;
550 int s;
551 int polled;
552
553 GDT_DPRINTF(GDT_D_CMD, ("gdt_scsi_cmd "));
554
555 s = splbio();
556
557 xs->error = XS_NOERROR;
558
559 if (target >= GDT_MAX_HDRIVES || !sc->sc_hdr[target].hd_present ||
560 link->lun != 0) {
561 /*
562 * XXX Should be XS_SENSE but that would require setting up a
563 * faked sense too.
564 */
565 xs->error = XS_DRIVER_STUFFUP;
566 scsi_done(xs);
567 splx(s);
568 return;
569 }
570
571 /* Don't double enqueue if we came from gdt_chain. */
572 if (xs != SIMPLEQ_FIRST(&sc->sc_queue))
573 gdt_enqueue(sc, xs, 0);
574
575 while ((xs = gdt_dequeue(sc)) != NULL) {
576 xs->error = XS_NOERROR;
577 ccb = NULL;
578 link = xs->sc_link;
579 target = link->target;
580 polled = ISSET(xs->flags, SCSI_POLL);
581
582 if (!gdt_polling && !(xs->flags & SCSI_POLL) &&
583 sc->sc_test_busy(sc)) {
584 /*
585 * Put it back in front. XXX Should we instead
586 * set xs->error to XS_BUSY?
587 */
588 gdt_enqueue(sc, xs, 1);
589 break;
590 }
591
592 switch (xs->cmd.opcode) {
593 case TEST_UNIT_READY:
594 case REQUEST_SENSE:
595 case INQUIRY:
596 case MODE_SENSE:
597 case START_STOP:
598 case READ_CAPACITY:
599 #if 0
600 case VERIFY:
601 #endif
602 gdt_internal_cache_cmd(xs);
603 scsi_done(xs);
604 goto ready;
605
606 case PREVENT_ALLOW:
607 GDT_DPRINTF(GDT_D_CMD, ("PREVENT/ALLOW "));
608 /* XXX Not yet implemented */
609 xs->error = XS_NOERROR;
610 scsi_done(xs);
611 goto ready;
612
613 default:
614 GDT_DPRINTF(GDT_D_CMD,
615 ("unknown opc %d ", xs->cmd.opcode));
616 /* XXX Not yet implemented */
617 xs->error = XS_DRIVER_STUFFUP;
618 scsi_done(xs);
619 goto ready;
620
621 case READ_COMMAND:
622 case READ_10:
623 case WRITE_COMMAND:
624 case WRITE_10:
625 case SYNCHRONIZE_CACHE:
626 /*
627 * A new command chain, start from the beginning.
628 */
629 sc->sc_cmd_off = 0;
630
631 if (xs->cmd.opcode == SYNCHRONIZE_CACHE) {
632 blockno = blockcnt = 0;
633 } else {
634 /* A read or write operation. */
635 if (xs->cmdlen == 6) {
636 rw = (struct scsi_rw *)&xs->cmd;
637 blockno = _3btol(rw->addr) &
638 (SRW_TOPADDR << 16 | 0xffff);
639 blockcnt =
640 rw->length ? rw->length : 0x100;
641 } else {
642 rw10 = (struct scsi_rw_10 *)&xs->cmd;
643 blockno = _4btol(rw10->addr);
644 blockcnt = _2btol(rw10->length);
645 }
646 if (blockno >= sc->sc_hdr[target].hd_size ||
647 blockno + blockcnt >
648 sc->sc_hdr[target].hd_size) {
649 printf(
650 "%s: out of bounds %u-%u >= %u\n",
651 DEVNAME(sc), blockno,
652 blockcnt,
653 sc->sc_hdr[target].hd_size);
654 /*
655 * XXX Should be XS_SENSE but that
656 * would require setting up a faked
657 * sense too.
658 */
659 xs->error = XS_DRIVER_STUFFUP;
660 scsi_done(xs);
661 goto ready;
662 }
663 }
664
665 ccb = xs->io;
666 ccb->gc_blockno = blockno;
667 ccb->gc_blockcnt = blockcnt;
668 ccb->gc_xs = xs;
669 ccb->gc_timeout = xs->timeout;
670 ccb->gc_service = GDT_CACHESERVICE;
671 ccb->gc_flags = 0;
672 gdt_ccb_set_cmd(ccb, GDT_GCF_SCSI);
673
674 if (xs->cmd.opcode != SYNCHRONIZE_CACHE) {
675 xfer = ccb->gc_dmamap_xfer;
676 error = bus_dmamap_load(sc->sc_dmat, xfer,
677 xs->data, xs->datalen, NULL,
678 (xs->flags & SCSI_NOSLEEP) ?
679 BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
680 if (error) {
681 printf("%s: gdt_scsi_cmd: ",
682 DEVNAME(sc));
683 if (error == EFBIG)
684 printf(
685 "more than %d dma segs\n",
686 GDT_MAXOFFSETS);
687 else
688 printf("error %d "
689 "loading dma map\n",
690 error);
691
692 xs->error = XS_DRIVER_STUFFUP;
693 scsi_done(xs);
694 goto ready;
695 }
696 bus_dmamap_sync(sc->sc_dmat, xfer, 0,
697 xfer->dm_mapsize,
698 (xs->flags & SCSI_DATA_IN) ?
699 BUS_DMASYNC_PREREAD :
700 BUS_DMASYNC_PREWRITE);
701 }
702
703 gdt_enqueue_ccb(sc, ccb);
704 /* XXX what if enqueue did not start a transfer? */
705 if (gdt_polling || (xs->flags & SCSI_POLL)) {
706 if (!gdt_wait(sc, ccb, ccb->gc_timeout)) {
707 printf("%s: command %d timed out\n",
708 DEVNAME(sc),
709 ccb->gc_cmd_index);
710 xs->error = XS_TIMEOUT;
711 scsi_done(xs);
712 splx(s);
713 return;
714 }
715 }
716 }
717
718 ready:
719 /*
720 * Don't process the queue if we are polling.
721 */
722 if (polled) {
723 break;
724 }
725 }
726
727 splx(s);
728 }
729
730 /* XXX Currently only for cacheservice, returns 0 if busy */
731 int
gdt_exec_ccb(struct gdt_ccb * ccb)732 gdt_exec_ccb(struct gdt_ccb *ccb)
733 {
734 struct scsi_xfer *xs = ccb->gc_xs;
735 struct scsi_link *link = xs->sc_link;
736 struct gdt_softc *sc = link->bus->sb_adapter_softc;
737 u_int8_t target = link->target;
738 u_int32_t sg_canz;
739 bus_dmamap_t xfer;
740 int i;
741 #if 1 /* XXX */
742 static int __level = 0;
743
744 if (__level++ > 0)
745 panic("level > 0");
746 #endif
747 GDT_DPRINTF(GDT_D_CMD, ("gdt_exec_ccb(%p, %p) ", xs, ccb));
748
749 sc->sc_cmd_cnt = 0;
750
751 /*
752 * XXX Yeah I know it's an always-true condition, but that may change
753 * later.
754 */
755 if (sc->sc_cmd_cnt == 0)
756 sc->sc_set_sema0(sc);
757
758 gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX, ccb->gc_cmd_index);
759 gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
760 gdt_enc16(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO,
761 target);
762
763 switch (xs->cmd.opcode) {
764 case PREVENT_ALLOW:
765 case SYNCHRONIZE_CACHE:
766 if (xs->cmd.opcode == PREVENT_ALLOW) {
767 /* XXX PREVENT_ALLOW support goes here */
768 } else {
769 GDT_DPRINTF(GDT_D_CMD,
770 ("SYNCHRONIZE CACHE tgt %d ", target));
771 sc->sc_cmd[GDT_CMD_OPCODE] = GDT_FLUSH;
772 }
773 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
774 1);
775 sg_canz = 0;
776 break;
777
778 case WRITE_COMMAND:
779 case WRITE_10:
780 /* XXX WRITE_THR could be supported too */
781 sc->sc_cmd[GDT_CMD_OPCODE] = GDT_WRITE;
782 break;
783
784 case READ_COMMAND:
785 case READ_10:
786 sc->sc_cmd[GDT_CMD_OPCODE] = GDT_READ;
787 break;
788 }
789
790 if (xs->cmd.opcode != PREVENT_ALLOW &&
791 xs->cmd.opcode != SYNCHRONIZE_CACHE) {
792 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO,
793 ccb->gc_blockno);
794 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT,
795 ccb->gc_blockcnt);
796
797 xfer = ccb->gc_dmamap_xfer;
798 if (sc->sc_cache_feat & GDT_SCATTER_GATHER) {
799 gdt_enc32(
800 sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
801 0xffffffff);
802 for (i = 0; i < xfer->dm_nsegs; i++) {
803 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
804 GDT_CACHE_SG_LST + i * GDT_SG_SZ +
805 GDT_SG_PTR,
806 xfer->dm_segs[i].ds_addr);
807 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
808 GDT_CACHE_SG_LST + i * GDT_SG_SZ +
809 GDT_SG_LEN,
810 xfer->dm_segs[i].ds_len);
811 GDT_DPRINTF(GDT_D_IO,
812 ("#%d pa %lx len %lx\n", i,
813 xfer->dm_segs[i].ds_addr,
814 xfer->dm_segs[i].ds_len));
815 }
816 sg_canz = xfer->dm_nsegs;
817 gdt_enc32(
818 sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST +
819 sg_canz * GDT_SG_SZ + GDT_SG_LEN, 0);
820 } else {
821 /* XXX Hardly correct */
822 gdt_enc32(
823 sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR,
824 xfer->dm_segs[0].ds_addr);
825 sg_canz = 0;
826 }
827 }
828 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, sg_canz);
829
830 sc->sc_cmd_len =
831 roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + sg_canz * GDT_SG_SZ,
832 sizeof (u_int32_t));
833
834 if (sc->sc_cmd_cnt > 0 &&
835 sc->sc_cmd_off + sc->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET >
836 sc->sc_ic_all_size) {
837 printf("%s: DPMEM overflow\n", DEVNAME(sc));
838 xs->error = XS_BUSY;
839 #if 1 /* XXX */
840 __level--;
841 #endif
842 return (0);
843 }
844
845 sc->sc_copy_cmd(sc, ccb);
846 sc->sc_release_event(sc, ccb);
847
848 xs->error = XS_NOERROR;
849 xs->resid = 0;
850 #if 1 /* XXX */
851 __level--;
852 #endif
853 return (1);
854 }
855
856 /* Emulated SCSI operation on cache device */
857 void
gdt_internal_cache_cmd(struct scsi_xfer * xs)858 gdt_internal_cache_cmd(struct scsi_xfer *xs)
859 {
860 struct scsi_link *link = xs->sc_link;
861 struct gdt_softc *sc = link->bus->sb_adapter_softc;
862 struct scsi_inquiry_data inq;
863 struct scsi_sense_data sd;
864 struct scsi_read_cap_data rcd;
865 u_int8_t target = link->target;
866
867 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd "));
868
869 switch (xs->cmd.opcode) {
870 case TEST_UNIT_READY:
871 case START_STOP:
872 #if 0
873 case VERIFY:
874 #endif
875 GDT_DPRINTF(GDT_D_CMD, ("opc %d tgt %d ", xs->cmd.opcode,
876 target));
877 break;
878
879 case REQUEST_SENSE:
880 GDT_DPRINTF(GDT_D_CMD, ("REQUEST SENSE tgt %d ", target));
881 bzero(&sd, sizeof sd);
882 sd.error_code = SSD_ERRCODE_CURRENT;
883 sd.segment = 0;
884 sd.flags = SKEY_NO_SENSE;
885 gdt_enc32(sd.info, 0);
886 sd.extra_len = 0;
887 scsi_copy_internal_data(xs, &sd, sizeof(sd));
888 break;
889
890 case INQUIRY:
891 GDT_DPRINTF(GDT_D_CMD, ("INQUIRY tgt %d devtype %x ", target,
892 sc->sc_hdr[target].hd_devtype));
893 bzero(&inq, sizeof inq);
894 inq.device =
895 (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT;
896 inq.dev_qual2 =
897 (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0;
898 inq.version = SCSI_REV_2;
899 inq.response_format = SID_SCSI2_RESPONSE;
900 inq.additional_length = SID_SCSI2_ALEN;
901 inq.flags |= SID_CmdQue;
902 strlcpy(inq.vendor, "ICP ", sizeof inq.vendor);
903 snprintf(inq.product, sizeof inq.product, "Host drive #%02d",
904 target);
905 strlcpy(inq.revision, " ", sizeof inq.revision);
906 scsi_copy_internal_data(xs, &inq, sizeof(inq));
907 break;
908
909 case READ_CAPACITY:
910 GDT_DPRINTF(GDT_D_CMD, ("READ CAPACITY tgt %d ", target));
911 bzero(&rcd, sizeof rcd);
912 _lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr);
913 _lto4b(GDT_SECTOR_SIZE, rcd.length);
914 scsi_copy_internal_data(xs, &rcd, sizeof(rcd));
915 break;
916
917 default:
918 GDT_DPRINTF(GDT_D_CMD, ("unsupported scsi command %#x tgt %d ",
919 xs->cmd.opcode, target));
920 xs->error = XS_DRIVER_STUFFUP;
921 return;
922 }
923
924 xs->error = XS_NOERROR;
925 }
926
927 void
gdt_clear_events(struct gdt_softc * sc)928 gdt_clear_events(struct gdt_softc *sc)
929 {
930 GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events(%p) ", sc));
931
932 /* XXX To be implemented */
933 }
934
935 int
gdt_async_event(struct gdt_softc * sc,int service)936 gdt_async_event(struct gdt_softc *sc, int service)
937 {
938 GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d) ", sc, service));
939
940 if (service == GDT_SCREENSERVICE) {
941 /* XXX To be implemented */
942 } else {
943 /* XXX To be implemented */
944 }
945
946 return (0);
947 }
948
949 int
gdt_sync_event(struct gdt_softc * sc,int service,u_int8_t index,struct scsi_xfer * xs)950 gdt_sync_event(struct gdt_softc *sc, int service, u_int8_t index,
951 struct scsi_xfer *xs)
952 {
953 GDT_DPRINTF(GDT_D_INTR,
954 ("gdt_sync_event(%p, %d, %d, %p) ", sc, service, index, xs));
955
956 if (service == GDT_SCREENSERVICE) {
957 GDT_DPRINTF(GDT_D_INTR, ("service == GDT_SCREENSERVICE "));
958 /* XXX To be implemented */
959 return (0);
960 } else {
961 switch (sc->sc_status) {
962 case GDT_S_OK:
963 GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_OK "));
964 /* XXX To be implemented */
965 break;
966 case GDT_S_BSY:
967 GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_BSY "));
968 /* XXX To be implemented */
969 return (2);
970 default:
971 GDT_DPRINTF(GDT_D_INTR, ("sc_status is %d ",
972 sc->sc_status));
973 /* XXX To be implemented */
974 return (0);
975 }
976 }
977
978 return (1);
979 }
980
981 int
gdt_intr(void * arg)982 gdt_intr(void *arg)
983 {
984 struct gdt_softc *sc = arg;
985 struct gdt_intr_ctx ctx;
986 int chain = 1;
987 int sync_val = 0;
988 struct scsi_xfer *xs = NULL;
989 int prev_cmd;
990 struct gdt_ccb *ccb;
991
992 GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p) ", sc));
993
994 /* If polling and we were not called from gdt_wait, just return */
995 if (gdt_polling && !gdt_from_wait)
996 return (0);
997
998 ctx.istatus = sc->sc_get_status(sc);
999 if (!ctx.istatus) {
1000 sc->sc_status = GDT_S_NO_STATUS;
1001 return (0);
1002 }
1003
1004 gdt_wait_index = 0;
1005 ctx.service = ctx.info2 = 0;
1006
1007 sc->sc_intr(sc, &ctx);
1008
1009 sc->sc_status = ctx.cmd_status;
1010 sc->sc_info = ctx.info;
1011 sc->sc_info2 = ctx.info2;
1012
1013 if (gdt_from_wait) {
1014 gdt_wait_gdt = sc;
1015 gdt_wait_index = ctx.istatus;
1016 }
1017
1018 switch (ctx.istatus) {
1019 case GDT_ASYNCINDEX:
1020 gdt_async_event(sc, ctx.service);
1021 goto finish;
1022
1023 case GDT_SPEZINDEX:
1024 printf("%s: uninitialized or unknown service (%d %d)\n",
1025 DEVNAME(sc), ctx.info, ctx.info2);
1026 chain = 0;
1027 goto finish;
1028 }
1029
1030 ccb = &sc->sc_ccbs[ctx.istatus - 2];
1031 xs = ccb->gc_xs;
1032 if (!gdt_polling)
1033 timeout_del(&xs->stimeout);
1034 ctx.service = ccb->gc_service;
1035 prev_cmd = ccb->gc_flags & GDT_GCF_CMD_MASK;
1036 if (xs && xs->cmd.opcode != PREVENT_ALLOW &&
1037 xs->cmd.opcode != SYNCHRONIZE_CACHE) {
1038 bus_dmamap_sync(sc->sc_dmat, ccb->gc_dmamap_xfer, 0,
1039 ccb->gc_dmamap_xfer->dm_mapsize,
1040 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1041 BUS_DMASYNC_POSTWRITE);
1042 bus_dmamap_unload(sc->sc_dmat, ccb->gc_dmamap_xfer);
1043 }
1044 switch (prev_cmd) {
1045 case GDT_GCF_UNUSED:
1046 /* XXX Not yet implemented */
1047 chain = 0;
1048 goto finish;
1049 case GDT_GCF_INTERNAL:
1050 chain = 0;
1051 goto finish;
1052 }
1053
1054 sync_val = gdt_sync_event(sc, ctx.service, ctx.istatus, xs);
1055
1056 finish:
1057 switch (sync_val) {
1058 case 0:
1059 if (xs && gdt_from_wait)
1060 scsi_done(xs);
1061 break;
1062 case 1:
1063 scsi_done(xs);
1064 break;
1065
1066 case 2:
1067 gdt_enqueue(sc, xs, 0);
1068 }
1069
1070 if (chain)
1071 gdt_chain(sc);
1072
1073 return (1);
1074 }
1075
1076 int
gdt_wait(struct gdt_softc * sc,struct gdt_ccb * ccb,int timeout)1077 gdt_wait(struct gdt_softc *sc, struct gdt_ccb *ccb, int timeout)
1078 {
1079 int s, rslt, rv = 0;
1080
1081 GDT_DPRINTF(GDT_D_MISC,
1082 ("gdt_wait(%p, %p, %d) ", sc, ccb, timeout));
1083
1084 gdt_from_wait = 1;
1085 do {
1086 s = splbio();
1087 rslt = gdt_intr(sc);
1088 splx(s);
1089 if (rslt && sc == gdt_wait_gdt &&
1090 ccb->gc_cmd_index == gdt_wait_index) {
1091 rv = 1;
1092 break;
1093 }
1094 DELAY(1000); /* 1 millisecond */
1095 } while (--timeout);
1096 gdt_from_wait = 0;
1097
1098 while (sc->sc_test_busy(sc))
1099 DELAY(0); /* XXX correct? */
1100
1101 return (rv);
1102 }
1103
1104 int
gdt_internal_cmd(struct gdt_softc * sc,u_int8_t service,u_int16_t opcode,u_int32_t arg1,u_int32_t arg2,u_int32_t arg3)1105 gdt_internal_cmd(struct gdt_softc *sc, u_int8_t service, u_int16_t opcode,
1106 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3)
1107 {
1108 int retries, rslt;
1109 struct gdt_ccb *ccb;
1110
1111 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d) ",
1112 sc, service, opcode, arg1, arg2, arg3));
1113
1114 bzero(sc->sc_cmd, GDT_CMD_SZ);
1115
1116 for (retries = GDT_RETRIES; ; ) {
1117 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1118 if (ccb == NULL) {
1119 printf("%s: no free command index found\n",
1120 DEVNAME(sc));
1121 return (0);
1122 }
1123 ccb->gc_service = service;
1124 ccb->gc_xs = NULL;
1125 ccb->gc_blockno = ccb->gc_blockcnt = 0;
1126 ccb->gc_timeout = ccb->gc_flags = 0;
1127 ccb->gc_service = GDT_CACHESERVICE;
1128 gdt_ccb_set_cmd(ccb, GDT_GCF_INTERNAL);
1129
1130 sc->sc_set_sema0(sc);
1131 gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX,
1132 ccb->gc_cmd_index);
1133 gdt_enc16(sc->sc_cmd + GDT_CMD_OPCODE, opcode);
1134 gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD);
1135
1136 switch (service) {
1137 case GDT_CACHESERVICE:
1138 if (opcode == GDT_IOCTL) {
1139 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1140 GDT_IOCTL_SUBFUNC, arg1);
1141 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1142 GDT_IOCTL_CHANNEL, arg2);
1143 gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1144 GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3);
1145 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1146 GDT_IOCTL_P_PARAM,
1147 sc->sc_scratch_seg.ds_addr);
1148 } else {
1149 gdt_enc16(sc->sc_cmd + GDT_CMD_UNION +
1150 GDT_CACHE_DEVICENO, (u_int16_t)arg1);
1151 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1152 GDT_CACHE_BLOCKNO, arg2);
1153 }
1154 break;
1155
1156 case GDT_SCSIRAWSERVICE:
1157 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION +
1158 GDT_RAW_DIRECTION, arg1);
1159 sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] =
1160 (u_int8_t)arg2;
1161 sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] =
1162 (u_int8_t)arg3;
1163 sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] =
1164 (u_int8_t)(arg3 >> 8);
1165 }
1166
1167 sc->sc_cmd_len = GDT_CMD_SZ;
1168 sc->sc_cmd_off = 0;
1169 sc->sc_cmd_cnt = 0;
1170 sc->sc_copy_cmd(sc, ccb);
1171 sc->sc_release_event(sc, ccb);
1172 DELAY(20);
1173
1174 rslt = gdt_wait(sc, ccb, GDT_POLL_TIMEOUT);
1175 scsi_io_put(&sc->sc_iopool, ccb);
1176
1177 if (!rslt)
1178 return (0);
1179 if (sc->sc_status != GDT_S_BSY || --retries == 0)
1180 break;
1181 DELAY(1);
1182 }
1183 return (sc->sc_status == GDT_S_OK);
1184 }
1185
1186 void *
gdt_ccb_alloc(void * xsc)1187 gdt_ccb_alloc(void *xsc)
1188 {
1189 struct gdt_softc *sc = xsc;
1190 struct gdt_ccb *ccb;
1191
1192 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_alloc(%p) ", sc));
1193
1194 mtx_enter(&sc->sc_ccb_mtx);
1195 ccb = TAILQ_FIRST(&sc->sc_free_ccb);
1196 if (ccb != NULL)
1197 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, gc_chain);
1198 mtx_leave(&sc->sc_ccb_mtx);
1199
1200 return (ccb);
1201 }
1202
1203 void
gdt_ccb_free(void * xsc,void * xccb)1204 gdt_ccb_free(void *xsc, void *xccb)
1205 {
1206 struct gdt_softc *sc = xsc;
1207 struct gdt_ccb *ccb = xccb;
1208 int wake = 0;
1209
1210 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_free(%p, %p) ", sc, ccb));
1211
1212 mtx_enter(&sc->sc_ccb_mtx);
1213 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, gc_chain);
1214 /* If the free list was empty, wake up potential waiters. */
1215 if (TAILQ_NEXT(ccb, gc_chain) == NULL)
1216 wake = 1;
1217 mtx_leave(&sc->sc_ccb_mtx);
1218
1219 if (wake)
1220 wakeup(&sc->sc_free_ccb);
1221 }
1222
1223 void
gdt_enqueue_ccb(struct gdt_softc * sc,struct gdt_ccb * ccb)1224 gdt_enqueue_ccb(struct gdt_softc *sc, struct gdt_ccb *ccb)
1225 {
1226 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_enqueue_ccb(%p, %p) ", sc, ccb));
1227
1228 timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1229 TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, gc_chain);
1230 gdt_start_ccbs(sc);
1231 }
1232
1233 void
gdt_start_ccbs(struct gdt_softc * sc)1234 gdt_start_ccbs(struct gdt_softc *sc)
1235 {
1236 struct gdt_ccb *ccb;
1237 struct scsi_xfer *xs;
1238
1239 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_start_ccbs(%p) ", sc));
1240
1241 while ((ccb = TAILQ_FIRST(&sc->sc_ccbq)) != NULL) {
1242
1243 xs = ccb->gc_xs;
1244 if (ccb->gc_flags & GDT_GCF_WATCHDOG)
1245 timeout_del(&xs->stimeout);
1246
1247 if (gdt_exec_ccb(ccb) == 0) {
1248 ccb->gc_flags |= GDT_GCF_WATCHDOG;
1249 timeout_set(&ccb->gc_xs->stimeout, gdt_watchdog, ccb);
1250 timeout_add_msec(&xs->stimeout, GDT_WATCH_TIMEOUT);
1251 break;
1252 }
1253 TAILQ_REMOVE(&sc->sc_ccbq, ccb, gc_chain);
1254
1255 if ((xs->flags & SCSI_POLL) == 0) {
1256 timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb);
1257 timeout_add_msec(&xs->stimeout, ccb->gc_timeout);
1258 }
1259 }
1260 }
1261
1262 void
gdt_chain(struct gdt_softc * sc)1263 gdt_chain(struct gdt_softc *sc)
1264 {
1265 GDT_DPRINTF(GDT_D_INTR, ("gdt_chain(%p) ", sc));
1266
1267 if (!SIMPLEQ_EMPTY(&sc->sc_queue))
1268 gdt_scsi_cmd(SIMPLEQ_FIRST(&sc->sc_queue));
1269 }
1270
1271 void
gdt_timeout(void * arg)1272 gdt_timeout(void *arg)
1273 {
1274 struct gdt_ccb *ccb = arg;
1275 struct scsi_link *link = ccb->gc_xs->sc_link;
1276 struct gdt_softc *sc = link->bus->sb_adapter_softc;
1277 int s;
1278
1279 sc_print_addr(link);
1280 printf("timed out\n");
1281
1282 /* XXX Test for multiple timeouts */
1283
1284 ccb->gc_xs->error = XS_TIMEOUT;
1285 s = splbio();
1286 gdt_enqueue_ccb(sc, ccb);
1287 splx(s);
1288 }
1289
1290 void
gdt_watchdog(void * arg)1291 gdt_watchdog(void *arg)
1292 {
1293 struct gdt_ccb *ccb = arg;
1294 struct scsi_link *link = ccb->gc_xs->sc_link;
1295 struct gdt_softc *sc = link->bus->sb_adapter_softc;
1296 int s;
1297
1298 s = splbio();
1299 ccb->gc_flags &= ~GDT_GCF_WATCHDOG;
1300 gdt_start_ccbs(sc);
1301 splx(s);
1302 }
1303
1304 #if NBIO > 0
1305 int
gdt_ioctl(struct device * dev,u_long cmd,caddr_t addr)1306 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1307 {
1308 struct gdt_softc *sc = (struct gdt_softc *)dev;
1309 int error = 0;
1310
1311 GDT_DPRINTF(GDT_D_IOCTL, ("%s: ioctl ", DEVNAME(sc)));
1312
1313 switch (cmd) {
1314 case BIOCINQ:
1315 GDT_DPRINTF(GDT_D_IOCTL, ("inq "));
1316 error = gdt_ioctl_inq(sc, (struct bioc_inq *)addr);
1317 break;
1318
1319 case BIOCVOL:
1320 GDT_DPRINTF(GDT_D_IOCTL, ("vol "));
1321 error = gdt_ioctl_vol(sc, (struct bioc_vol *)addr);
1322 break;
1323
1324 case BIOCDISK:
1325 GDT_DPRINTF(GDT_D_IOCTL, ("disk "));
1326 error = gdt_ioctl_disk(sc, (struct bioc_disk *)addr);
1327 break;
1328
1329 case BIOCALARM:
1330 GDT_DPRINTF(GDT_D_IOCTL, ("alarm "));
1331 error = gdt_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1332 break;
1333
1334 case BIOCSETSTATE:
1335 GDT_DPRINTF(GDT_D_IOCTL, ("setstate "));
1336 error = gdt_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1337 break;
1338
1339 default:
1340 GDT_DPRINTF(GDT_D_IOCTL, (" invalid ioctl\n"));
1341 error = ENOTTY;
1342 }
1343
1344 return (error);
1345 }
1346
1347 int
gdt_ioctl_inq(struct gdt_softc * sc,struct bioc_inq * bi)1348 gdt_ioctl_inq(struct gdt_softc *sc, struct bioc_inq *bi)
1349 {
1350 bi->bi_novol = sc->sc_ndevs;
1351 bi->bi_nodisk = sc->sc_total_disks;
1352
1353 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1354
1355 return (0);
1356 }
1357
1358 int
gdt_ioctl_vol(struct gdt_softc * sc,struct bioc_vol * bv)1359 gdt_ioctl_vol(struct gdt_softc *sc, struct bioc_vol *bv)
1360 {
1361 return (1); /* XXX not yet */
1362 }
1363
1364 int
gdt_ioctl_disk(struct gdt_softc * sc,struct bioc_disk * bd)1365 gdt_ioctl_disk(struct gdt_softc *sc, struct bioc_disk *bd)
1366 {
1367 return (1); /* XXX not yet */
1368 }
1369
1370 int
gdt_ioctl_alarm(struct gdt_softc * sc,struct bioc_alarm * ba)1371 gdt_ioctl_alarm(struct gdt_softc *sc, struct bioc_alarm *ba)
1372 {
1373 return (1); /* XXX not yet */
1374 }
1375
1376 int
gdt_ioctl_setstate(struct gdt_softc * sc,struct bioc_setstate * bs)1377 gdt_ioctl_setstate(struct gdt_softc *sc, struct bioc_setstate *bs)
1378 {
1379 return (1); /* XXX not yet */
1380 }
1381 #endif /* NBIO > 0 */
1382