1 /* $OpenBSD: gdt_common.c,v 1.57 2011/04/19 23:59:11 krw Exp $ */ 2 3 /* 4 * Copyright (c) 1999, 2000, 2003 Niklas Hallqvist. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * This driver would not have written if it was not for the hardware donations 29 * from both ICP-Vortex and �ko.neT. I want to thank them for their support. 30 */ 31 32 #include <sys/param.h> 33 #include <sys/buf.h> 34 #include <sys/device.h> 35 #include <sys/ioctl.h> 36 #include <sys/kernel.h> 37 #include <sys/malloc.h> 38 #include <sys/systm.h> 39 40 #include <machine/bus.h> 41 42 #include <uvm/uvm_extern.h> 43 44 #include <scsi/scsi_all.h> 45 #include <scsi/scsi_disk.h> 46 #include <scsi/scsiconf.h> 47 48 #include <dev/biovar.h> 49 #include <dev/ic/gdtreg.h> 50 #include <dev/ic/gdtvar.h> 51 52 #include "bio.h" 53 54 #ifdef GDT_DEBUG 55 int gdt_maxcmds = GDT_MAXCMDS; 56 #undef GDT_MAXCMDS 57 #define GDT_MAXCMDS gdt_maxcmds 58 #endif 59 60 #define GDT_DRIVER_VERSION 1 61 #define GDT_DRIVER_SUBVERSION 2 62 63 int gdt_async_event(struct gdt_softc *, int); 64 void gdt_chain(struct gdt_softc *); 65 void gdt_clear_events(struct gdt_softc *); 66 void gdt_copy_internal_data(struct scsi_xfer *, u_int8_t *, size_t); 67 struct scsi_xfer *gdt_dequeue(struct gdt_softc *); 68 void gdt_enqueue(struct gdt_softc *, struct scsi_xfer *, int); 69 void gdt_enqueue_ccb(struct gdt_softc *, struct gdt_ccb *); 70 void gdt_eval_mapping(u_int32_t, int *, int *, int *); 71 int gdt_exec_ccb(struct gdt_ccb *); 72 void gdt_ccb_free(void *, void *); 73 void *gdt_ccb_alloc(void *); 74 void gdt_internal_cache_cmd(struct scsi_xfer *); 75 int gdt_internal_cmd(struct gdt_softc *, u_int8_t, u_int16_t, 76 u_int32_t, u_int32_t, u_int32_t); 77 #if NBIO > 0 78 int gdt_ioctl(struct device *, u_long, caddr_t); 79 int gdt_ioctl_inq(struct gdt_softc *, struct bioc_inq *); 80 int gdt_ioctl_vol(struct gdt_softc *, struct bioc_vol *); 81 int gdt_ioctl_disk(struct gdt_softc *, struct bioc_disk *); 82 int gdt_ioctl_alarm(struct gdt_softc *, struct bioc_alarm *); 83 int gdt_ioctl_setstate(struct gdt_softc *, struct bioc_setstate *); 84 #endif /* NBIO > 0 */ 85 void gdt_scsi_cmd(struct scsi_xfer *); 86 void gdt_start_ccbs(struct gdt_softc *); 87 int gdt_sync_event(struct gdt_softc *, int, u_int8_t, 88 struct scsi_xfer *); 89 void gdt_timeout(void *); 90 int gdt_wait(struct gdt_softc *, struct gdt_ccb *, int); 91 void gdt_watchdog(void *); 92 93 struct cfdriver gdt_cd = { 94 NULL, "gdt", DV_DULL 95 }; 96 97 struct scsi_adapter gdt_switch = { 98 gdt_scsi_cmd, gdtminphys, 0, 0, 99 }; 100 101 int gdt_cnt = 0; 102 u_int8_t gdt_polling; 103 u_int8_t gdt_from_wait; 104 struct gdt_softc *gdt_wait_gdt; 105 int gdt_wait_index; 106 #ifdef GDT_DEBUG 107 int gdt_debug = GDT_DEBUG; 108 #endif 109 110 int 111 gdt_attach(struct gdt_softc *sc) 112 { 113 struct scsibus_attach_args saa; 114 u_int16_t cdev_cnt; 115 int i, id, drv_cyls, drv_hds, drv_secs, error, nsegs; 116 117 gdt_polling = 1; 118 gdt_from_wait = 0; 119 120 if (bus_dmamem_alloc(sc->sc_dmat, GDT_SCRATCH_SZ, PAGE_SIZE, 0, 121 &sc->sc_scratch_seg, 1, &nsegs, BUS_DMA_NOWAIT)) 122 panic("%s: bus_dmamem_alloc failed", DEVNAME(sc)); 123 if (bus_dmamem_map(sc->sc_dmat, &sc->sc_scratch_seg, 1, 124 GDT_SCRATCH_SZ, &sc->sc_scratch, BUS_DMA_NOWAIT)) 125 panic("%s: bus_dmamem_map failed", DEVNAME(sc)); 126 127 gdt_clear_events(sc); 128 129 TAILQ_INIT(&sc->sc_free_ccb); 130 TAILQ_INIT(&sc->sc_ccbq); 131 TAILQ_INIT(&sc->sc_ucmdq); 132 LIST_INIT(&sc->sc_queue); 133 134 mtx_init(&sc->sc_ccb_mtx, IPL_BIO); 135 scsi_iopool_init(&sc->sc_iopool, sc, gdt_ccb_alloc, gdt_ccb_free); 136 137 /* Initialize the ccbs */ 138 for (i = 0; i < GDT_MAXCMDS; i++) { 139 sc->sc_ccbs[i].gc_cmd_index = i + 2; 140 error = bus_dmamap_create(sc->sc_dmat, 141 (GDT_MAXOFFSETS - 1) << PGSHIFT, GDT_MAXOFFSETS, 142 (GDT_MAXOFFSETS - 1) << PGSHIFT, 0, 143 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 144 &sc->sc_ccbs[i].gc_dmamap_xfer); 145 if (error) { 146 printf("%s: cannot create ccb dmamap (%d)", 147 DEVNAME(sc), error); 148 return (1); 149 } 150 (void)gdt_ccb_set_cmd(sc->sc_ccbs + i, GDT_GCF_UNUSED); 151 TAILQ_INSERT_TAIL(&sc->sc_free_ccb, &sc->sc_ccbs[i], 152 gc_chain); 153 } 154 155 /* Fill in the prototype scsi_link. */ 156 sc->sc_link.adapter_softc = sc; 157 sc->sc_link.adapter = &gdt_switch; 158 /* openings will be filled in later. */ 159 sc->sc_link.adapter_buswidth = 160 (sc->sc_class & GDT_FC) ? GDT_MAXID : GDT_MAX_HDRIVES; 161 sc->sc_link.adapter_target = sc->sc_link.adapter_buswidth; 162 sc->sc_link.pool = &sc->sc_iopool; 163 164 if (!gdt_internal_cmd(sc, GDT_SCREENSERVICE, GDT_INIT, 0, 0, 0)) { 165 printf("screen service initialization error %d\n", 166 sc->sc_status); 167 return (1); 168 } 169 170 if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INIT, GDT_LINUX_OS, 0, 171 0)) { 172 printf("cache service initialization error %d\n", 173 sc->sc_status); 174 return (1); 175 } 176 177 cdev_cnt = (u_int16_t)sc->sc_info; 178 179 /* Detect number of busses */ 180 gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST); 181 sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS; 182 sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0; 183 sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1; 184 gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ); 185 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, 186 GDT_IOCHAN_RAW_DESC, GDT_INVALID_CHANNEL, 187 GDT_IOC_HDR_SZ + GDT_RAWIOC_SZ)) { 188 sc->sc_bus_cnt = sc->sc_scratch[GDT_IOC_CHAN_COUNT]; 189 for (i = 0; i < sc->sc_bus_cnt; i++) { 190 id = sc->sc_scratch[GDT_IOC_HDR_SZ + 191 i * GDT_RAWIOC_SZ + GDT_RAWIOC_PROC_ID]; 192 sc->sc_bus_id[id] = id < GDT_MAXBUS ? id : 0xff; 193 } 194 195 } else { 196 /* New method failed, use fallback. */ 197 gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO, i); 198 for (i = 0; i < GDT_MAXBUS; i++) { 199 if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, 200 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN, 201 GDT_IO_CHANNEL | GDT_INVALID_CHANNEL, 202 GDT_GETCH_SZ)) { 203 if (i == 0) { 204 printf("cannot get channel count, " 205 "error %d\n", sc->sc_status); 206 return (1); 207 } 208 break; 209 } 210 sc->sc_bus_id[i] = 211 (sc->sc_scratch[GDT_GETCH_SIOP_ID] < GDT_MAXID) ? 212 sc->sc_scratch[GDT_GETCH_SIOP_ID] : 0xff; 213 } 214 sc->sc_bus_cnt = i; 215 } 216 217 /* Read cache configuration */ 218 if (!gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_CACHE_INFO, 219 GDT_INVALID_CHANNEL, GDT_CINFO_SZ)) { 220 printf("cannot get cache info, error %d\n", sc->sc_status); 221 return (1); 222 } 223 sc->sc_cpar.cp_version = 224 gdt_dec32(sc->sc_scratch + GDT_CPAR_VERSION); 225 sc->sc_cpar.cp_state = gdt_dec16(sc->sc_scratch + GDT_CPAR_STATE); 226 sc->sc_cpar.cp_strategy = 227 gdt_dec16(sc->sc_scratch + GDT_CPAR_STRATEGY); 228 sc->sc_cpar.cp_write_back = 229 gdt_dec16(sc->sc_scratch + GDT_CPAR_WRITE_BACK); 230 sc->sc_cpar.cp_block_size = 231 gdt_dec16(sc->sc_scratch + GDT_CPAR_BLOCK_SIZE); 232 233 /* Read board information and features */ 234 sc->sc_more_proc = 0; 235 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, GDT_BOARD_INFO, 236 GDT_INVALID_CHANNEL, GDT_BINFO_SZ)) { 237 /* XXX A lot of these assignments can probably go later */ 238 sc->sc_binfo.bi_ser_no = 239 gdt_dec32(sc->sc_scratch + GDT_BINFO_SER_NO); 240 bcopy(sc->sc_scratch + GDT_BINFO_OEM_ID, 241 sc->sc_binfo.bi_oem_id, sizeof sc->sc_binfo.bi_oem_id); 242 sc->sc_binfo.bi_ep_flags = 243 gdt_dec16(sc->sc_scratch + GDT_BINFO_EP_FLAGS); 244 sc->sc_binfo.bi_proc_id = 245 gdt_dec32(sc->sc_scratch + GDT_BINFO_PROC_ID); 246 sc->sc_binfo.bi_memsize = 247 gdt_dec32(sc->sc_scratch + GDT_BINFO_MEMSIZE); 248 sc->sc_binfo.bi_mem_banks = 249 sc->sc_scratch[GDT_BINFO_MEM_BANKS]; 250 sc->sc_binfo.bi_chan_type = 251 sc->sc_scratch[GDT_BINFO_CHAN_TYPE]; 252 sc->sc_binfo.bi_chan_count = 253 sc->sc_scratch[GDT_BINFO_CHAN_COUNT]; 254 sc->sc_binfo.bi_rdongle_pres = 255 sc->sc_scratch[GDT_BINFO_RDONGLE_PRES]; 256 sc->sc_binfo.bi_epr_fw_ver = 257 gdt_dec32(sc->sc_scratch + GDT_BINFO_EPR_FW_VER); 258 sc->sc_binfo.bi_upd_fw_ver = 259 gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_FW_VER); 260 sc->sc_binfo.bi_upd_revision = 261 gdt_dec32(sc->sc_scratch + GDT_BINFO_UPD_REVISION); 262 bcopy(sc->sc_scratch + GDT_BINFO_TYPE_STRING, 263 sc->sc_binfo.bi_type_string, 264 sizeof sc->sc_binfo.bi_type_string); 265 bcopy(sc->sc_scratch + GDT_BINFO_RAID_STRING, 266 sc->sc_binfo.bi_raid_string, 267 sizeof sc->sc_binfo.bi_raid_string); 268 sc->sc_binfo.bi_update_pres = 269 sc->sc_scratch[GDT_BINFO_UPDATE_PRES]; 270 sc->sc_binfo.bi_xor_pres = 271 sc->sc_scratch[GDT_BINFO_XOR_PRES]; 272 sc->sc_binfo.bi_prom_type = 273 sc->sc_scratch[GDT_BINFO_PROM_TYPE]; 274 sc->sc_binfo.bi_prom_count = 275 sc->sc_scratch[GDT_BINFO_PROM_COUNT]; 276 sc->sc_binfo.bi_dup_pres = 277 gdt_dec32(sc->sc_scratch + GDT_BINFO_DUP_PRES); 278 sc->sc_binfo.bi_chan_pres = 279 gdt_dec32(sc->sc_scratch + GDT_BINFO_CHAN_PRES); 280 sc->sc_binfo.bi_mem_pres = 281 gdt_dec32(sc->sc_scratch + GDT_BINFO_MEM_PRES); 282 sc->sc_binfo.bi_ft_bus_system = 283 sc->sc_scratch[GDT_BINFO_FT_BUS_SYSTEM]; 284 sc->sc_binfo.bi_subtype_valid = 285 sc->sc_scratch[GDT_BINFO_SUBTYPE_VALID]; 286 sc->sc_binfo.bi_board_subtype = 287 sc->sc_scratch[GDT_BINFO_BOARD_SUBTYPE]; 288 sc->sc_binfo.bi_rampar_pres = 289 sc->sc_scratch[GDT_BINFO_RAMPAR_PRES]; 290 291 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, 292 GDT_BOARD_FEATURES, GDT_INVALID_CHANNEL, GDT_BFEAT_SZ)) { 293 sc->sc_bfeat.bf_chaining = 294 sc->sc_scratch[GDT_BFEAT_CHAINING]; 295 sc->sc_bfeat.bf_striping = 296 sc->sc_scratch[GDT_BFEAT_STRIPING]; 297 sc->sc_bfeat.bf_mirroring = 298 sc->sc_scratch[GDT_BFEAT_MIRRORING]; 299 sc->sc_bfeat.bf_raid = 300 sc->sc_scratch[GDT_BFEAT_RAID]; 301 sc->sc_more_proc = 1; 302 } 303 } else { 304 /* XXX Not implemented yet */ 305 } 306 307 /* Read more information */ 308 if (sc->sc_more_proc) { 309 int bus, j; 310 /* physical drives, channel addresses */ 311 /* step 1: get magical bus number from firmware */ 312 gdt_enc32(sc->sc_scratch + GDT_IOC_VERSION, GDT_IOC_NEWEST); 313 sc->sc_scratch[GDT_IOC_LIST_ENTRIES] = GDT_MAXBUS; 314 sc->sc_scratch[GDT_IOC_FIRST_CHAN] = 0; 315 sc->sc_scratch[GDT_IOC_LAST_CHAN] = GDT_MAXBUS - 1; 316 gdt_enc32(sc->sc_scratch + GDT_IOC_LIST_OFFSET, GDT_IOC_HDR_SZ); 317 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, 318 GDT_IOCHAN_DESC, GDT_INVALID_CHANNEL, 319 GDT_IOC_HDR_SZ + GDT_IOC_SZ * GDT_MAXBUS)) { 320 GDT_DPRINTF(GDT_D_INFO, ("method 1\n")); 321 for (bus = 0; bus < sc->sc_bus_cnt; bus++) { 322 sc->sc_raw[bus].ra_address = 323 gdt_dec32(sc->sc_scratch + 324 GDT_IOC_HDR_SZ + 325 GDT_IOC_SZ * bus + 326 GDT_IOC_ADDRESS); 327 sc->sc_raw[bus].ra_local_no = 328 gdt_dec8(sc->sc_scratch + 329 GDT_IOC_HDR_SZ + 330 GDT_IOC_SZ * bus + 331 GDT_IOC_LOCAL_NO); 332 GDT_DPRINTF(GDT_D_INFO, ( 333 "bus: %d address: %x local: %x\n", 334 bus, 335 sc->sc_raw[bus].ra_address, 336 sc->sc_raw[bus].ra_local_no)); 337 } 338 } else { 339 GDT_DPRINTF(GDT_D_INFO, ("method 2\n")); 340 for (bus = 0; bus < sc->sc_bus_cnt; bus++) { 341 sc->sc_raw[bus].ra_address = GDT_IO_CHANNEL; 342 sc->sc_raw[bus].ra_local_no = bus; 343 GDT_DPRINTF(GDT_D_INFO, ( 344 "bus: %d address: %x local: %x\n", 345 bus, 346 sc->sc_raw[bus].ra_address, 347 sc->sc_raw[bus].ra_local_no)); 348 } 349 } 350 /* step 2: use magical bus number to get nr of phys disks */ 351 for (bus = 0; bus < sc->sc_bus_cnt; bus++) { 352 gdt_enc32(sc->sc_scratch + GDT_GETCH_CHANNEL_NO, 353 sc->sc_raw[bus].ra_local_no); 354 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_IOCTL, 355 GDT_SCSI_CHAN_CNT | GDT_L_CTRL_PATTERN, 356 sc->sc_raw[bus].ra_address | GDT_INVALID_CHANNEL, 357 GDT_GETCH_SZ)) { 358 sc->sc_raw[bus].ra_phys_cnt = 359 gdt_dec32(sc->sc_scratch + 360 GDT_GETCH_DRIVE_CNT); 361 GDT_DPRINTF(GDT_D_INFO, ("chan: %d disks: %d\n", 362 bus, sc->sc_raw[bus].ra_phys_cnt)); 363 } 364 365 /* step 3: get scsi disk nr */ 366 if (sc->sc_raw[bus].ra_phys_cnt > 0) { 367 gdt_enc32(sc->sc_scratch + 368 GDT_GETSCSI_CHAN, 369 sc->sc_raw[bus].ra_local_no); 370 gdt_enc32(sc->sc_scratch + 371 GDT_GETSCSI_CNT, 372 sc->sc_raw[bus].ra_phys_cnt); 373 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, 374 GDT_IOCTL, 375 GDT_SCSI_DR_LIST | GDT_L_CTRL_PATTERN, 376 sc->sc_raw[bus].ra_address | 377 GDT_INVALID_CHANNEL, 378 GDT_GETSCSI_SZ)) 379 for (j = 0; 380 j < sc->sc_raw[bus].ra_phys_cnt; 381 j++) { 382 sc->sc_raw[bus].ra_id_list[j] = 383 gdt_dec32(sc->sc_scratch + 384 GDT_GETSCSI_LIST + 385 GDT_GETSCSI_LIST_SZ * j); 386 GDT_DPRINTF(GDT_D_INFO, 387 (" diskid: %d\n", 388 sc->sc_raw[bus].ra_id_list[j])); 389 } 390 else 391 sc->sc_raw[bus].ra_phys_cnt = 0; 392 } 393 /* add found disks to grand total */ 394 sc->sc_total_disks += sc->sc_raw[bus].ra_phys_cnt; 395 } 396 } /* if (sc->sc_more_proc) */ 397 398 if (!gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_INIT, 0, 0, 0)) { 399 printf("raw service initialization error %d\n", 400 sc->sc_status); 401 return (1); 402 } 403 404 /* Set/get features raw service (scatter/gather) */ 405 sc->sc_raw_feat = 0; 406 if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_SET_FEAT, 407 GDT_SCATTER_GATHER, 0, 0)) 408 if (gdt_internal_cmd(sc, GDT_SCSIRAWSERVICE, GDT_GET_FEAT, 0, 409 0, 0)) 410 sc->sc_raw_feat = sc->sc_info; 411 412 /* Set/get features cache service (scatter/gather) */ 413 sc->sc_cache_feat = 0; 414 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_SET_FEAT, 0, 415 GDT_SCATTER_GATHER, 0)) 416 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_GET_FEAT, 0, 0, 417 0)) 418 sc->sc_cache_feat = sc->sc_info; 419 420 /* XXX Linux reserve drives here, potentially */ 421 422 sc->sc_ndevs = 0; 423 /* Scan for cache devices */ 424 for (i = 0; i < cdev_cnt && i < GDT_MAX_HDRIVES; i++) 425 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, GDT_INFO, i, 0, 426 0)) { 427 sc->sc_hdr[i].hd_present = 1; 428 sc->sc_hdr[i].hd_size = sc->sc_info; 429 430 if (sc->sc_hdr[i].hd_size > 0) 431 sc->sc_ndevs++; 432 433 /* 434 * Evaluate mapping (sectors per head, heads per cyl) 435 */ 436 sc->sc_hdr[i].hd_size &= ~GDT_SECS32; 437 if (sc->sc_info2 == 0) 438 gdt_eval_mapping(sc->sc_hdr[i].hd_size, 439 &drv_cyls, &drv_hds, &drv_secs); 440 else { 441 drv_hds = sc->sc_info2 & 0xff; 442 drv_secs = (sc->sc_info2 >> 8) & 0xff; 443 drv_cyls = sc->sc_hdr[i].hd_size / drv_hds / 444 drv_secs; 445 } 446 sc->sc_hdr[i].hd_heads = drv_hds; 447 sc->sc_hdr[i].hd_secs = drv_secs; 448 /* Round the size */ 449 sc->sc_hdr[i].hd_size = drv_cyls * drv_hds * drv_secs; 450 451 if (gdt_internal_cmd(sc, GDT_CACHESERVICE, 452 GDT_DEVTYPE, i, 0, 0)) 453 sc->sc_hdr[i].hd_devtype = sc->sc_info; 454 } 455 456 if (sc->sc_ndevs == 0) 457 sc->sc_link.openings = 0; 458 else 459 sc->sc_link.openings = (GDT_MAXCMDS - GDT_CMD_RESERVE) / 460 sc->sc_ndevs; 461 462 printf("dpmem %llx %d-bus %d cache device%s\n", 463 (long long)sc->sc_dpmembase, 464 sc->sc_bus_cnt, cdev_cnt, cdev_cnt == 1 ? "" : "s"); 465 printf("%s: ver %x, cache %s, strategy %d, writeback %s, blksz %d\n", 466 DEVNAME(sc), sc->sc_cpar.cp_version, 467 sc->sc_cpar.cp_state ? "on" : "off", sc->sc_cpar.cp_strategy, 468 sc->sc_cpar.cp_write_back ? "on" : "off", 469 sc->sc_cpar.cp_block_size); 470 #if 1 471 printf("%s: raw feat %x cache feat %x\n", DEVNAME(sc), 472 sc->sc_raw_feat, sc->sc_cache_feat); 473 #endif 474 475 #if NBIO > 0 476 if (bio_register(&sc->sc_dev, gdt_ioctl) != 0) 477 panic("%s: controller registration failed", DEVNAME(sc)); 478 #endif 479 gdt_cnt++; 480 481 bzero(&saa, sizeof(saa)); 482 saa.saa_sc_link = &sc->sc_link; 483 484 config_found(&sc->sc_dev, &saa, scsiprint); 485 486 gdt_polling = 0; 487 return (0); 488 } 489 490 void 491 gdt_eval_mapping(u_int32_t size, int *cyls, int *heads, int *secs) 492 { 493 *cyls = size / GDT_HEADS / GDT_SECS; 494 if (*cyls < GDT_MAXCYLS) { 495 *heads = GDT_HEADS; 496 *secs = GDT_SECS; 497 } else { 498 /* Too high for 64 * 32 */ 499 *cyls = size / GDT_MEDHEADS / GDT_MEDSECS; 500 if (*cyls < GDT_MAXCYLS) { 501 *heads = GDT_MEDHEADS; 502 *secs = GDT_MEDSECS; 503 } else { 504 /* Too high for 127 * 63 */ 505 *cyls = size / GDT_BIGHEADS / GDT_BIGSECS; 506 *heads = GDT_BIGHEADS; 507 *secs = GDT_BIGSECS; 508 } 509 } 510 } 511 512 /* 513 * Insert a command into the driver queue, either at the front or at the tail. 514 * It's ok to overload the freelist link as these structures are never on 515 * the freelist at this time. 516 */ 517 void 518 gdt_enqueue(struct gdt_softc *sc, struct scsi_xfer *xs, int infront) 519 { 520 if (infront || LIST_FIRST(&sc->sc_queue) == NULL) { 521 if (LIST_FIRST(&sc->sc_queue) == NULL) 522 sc->sc_queuelast = xs; 523 LIST_INSERT_HEAD(&sc->sc_queue, xs, free_list); 524 return; 525 } 526 LIST_INSERT_AFTER(sc->sc_queuelast, xs, free_list); 527 sc->sc_queuelast = xs; 528 } 529 530 /* 531 * Pull a command off the front of the driver queue. 532 */ 533 struct scsi_xfer * 534 gdt_dequeue(struct gdt_softc *sc) 535 { 536 struct scsi_xfer *xs; 537 538 xs = LIST_FIRST(&sc->sc_queue); 539 if (xs == NULL) 540 return (NULL); 541 LIST_REMOVE(xs, free_list); 542 543 if (LIST_FIRST(&sc->sc_queue) == NULL) 544 sc->sc_queuelast = NULL; 545 546 return (xs); 547 } 548 549 /* 550 * Start a SCSI operation on a cache device. 551 * XXX Polled operation is not yet complete. What kind of locking do we need? 552 */ 553 void 554 gdt_scsi_cmd(struct scsi_xfer *xs) 555 { 556 struct scsi_link *link = xs->sc_link; 557 struct gdt_softc *sc = link->adapter_softc; 558 u_int8_t target = link->target; 559 struct gdt_ccb *ccb; 560 #if 0 561 struct gdt_ucmd *ucmd; 562 #endif 563 u_int32_t blockno, blockcnt; 564 struct scsi_rw *rw; 565 struct scsi_rw_big *rwb; 566 bus_dmamap_t xfer; 567 int error; 568 int s; 569 int polled; 570 571 GDT_DPRINTF(GDT_D_CMD, ("gdt_scsi_cmd ")); 572 573 s = splbio(); 574 575 xs->error = XS_NOERROR; 576 577 if (target >= GDT_MAX_HDRIVES || !sc->sc_hdr[target].hd_present || 578 link->lun != 0) { 579 /* 580 * XXX Should be XS_SENSE but that would require setting up a 581 * faked sense too. 582 */ 583 xs->error = XS_DRIVER_STUFFUP; 584 scsi_done(xs); 585 splx(s); 586 return; 587 } 588 589 /* Don't double enqueue if we came from gdt_chain. */ 590 if (xs != LIST_FIRST(&sc->sc_queue)) 591 gdt_enqueue(sc, xs, 0); 592 593 while ((xs = gdt_dequeue(sc)) != NULL) { 594 xs->error = XS_NOERROR; 595 ccb = NULL; 596 link = xs->sc_link; 597 target = link->target; 598 polled = ISSET(xs->flags, SCSI_POLL); 599 600 if (!gdt_polling && !(xs->flags & SCSI_POLL) && 601 sc->sc_test_busy(sc)) { 602 /* 603 * Put it back in front. XXX Should we instead 604 * set xs->error to XS_BUSY? 605 */ 606 gdt_enqueue(sc, xs, 1); 607 break; 608 } 609 610 switch (xs->cmd->opcode) { 611 case TEST_UNIT_READY: 612 case REQUEST_SENSE: 613 case INQUIRY: 614 case MODE_SENSE: 615 case START_STOP: 616 case READ_CAPACITY: 617 #if 0 618 case VERIFY: 619 #endif 620 gdt_internal_cache_cmd(xs); 621 scsi_done(xs); 622 goto ready; 623 624 case PREVENT_ALLOW: 625 GDT_DPRINTF(GDT_D_CMD, ("PREVENT/ALLOW ")); 626 /* XXX Not yet implemented */ 627 xs->error = XS_NOERROR; 628 scsi_done(xs); 629 goto ready; 630 631 default: 632 GDT_DPRINTF(GDT_D_CMD, 633 ("unknown opc %d ", xs->cmd->opcode)); 634 /* XXX Not yet implemented */ 635 xs->error = XS_DRIVER_STUFFUP; 636 scsi_done(xs); 637 goto ready; 638 639 case READ_COMMAND: 640 case READ_BIG: 641 case WRITE_COMMAND: 642 case WRITE_BIG: 643 case SYNCHRONIZE_CACHE: 644 /* 645 * A new command chain, start from the beginning. 646 */ 647 sc->sc_cmd_off = 0; 648 649 if (xs->cmd->opcode == SYNCHRONIZE_CACHE) { 650 blockno = blockcnt = 0; 651 } else { 652 /* A read or write operation. */ 653 if (xs->cmdlen == 6) { 654 rw = (struct scsi_rw *)xs->cmd; 655 blockno = _3btol(rw->addr) & 656 (SRW_TOPADDR << 16 | 0xffff); 657 blockcnt = 658 rw->length ? rw->length : 0x100; 659 } else { 660 rwb = (struct scsi_rw_big *)xs->cmd; 661 blockno = _4btol(rwb->addr); 662 blockcnt = _2btol(rwb->length); 663 } 664 if (blockno >= sc->sc_hdr[target].hd_size || 665 blockno + blockcnt > 666 sc->sc_hdr[target].hd_size) { 667 printf( 668 "%s: out of bounds %u-%u >= %u\n", 669 DEVNAME(sc), blockno, 670 blockcnt, 671 sc->sc_hdr[target].hd_size); 672 /* 673 * XXX Should be XS_SENSE but that 674 * would require setting up a faked 675 * sense too. 676 */ 677 xs->error = XS_DRIVER_STUFFUP; 678 scsi_done(xs); 679 goto ready; 680 } 681 } 682 683 ccb = xs->io; 684 ccb->gc_blockno = blockno; 685 ccb->gc_blockcnt = blockcnt; 686 ccb->gc_xs = xs; 687 ccb->gc_timeout = xs->timeout; 688 ccb->gc_service = GDT_CACHESERVICE; 689 ccb->gc_flags = 0; 690 gdt_ccb_set_cmd(ccb, GDT_GCF_SCSI); 691 692 if (xs->cmd->opcode != SYNCHRONIZE_CACHE) { 693 xfer = ccb->gc_dmamap_xfer; 694 error = bus_dmamap_load(sc->sc_dmat, xfer, 695 xs->data, xs->datalen, NULL, 696 (xs->flags & SCSI_NOSLEEP) ? 697 BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 698 if (error) { 699 printf("%s: gdt_scsi_cmd: ", 700 DEVNAME(sc)); 701 if (error == EFBIG) 702 printf( 703 "more than %d dma segs\n", 704 GDT_MAXOFFSETS); 705 else 706 printf("error %d " 707 "loading dma map\n", 708 error); 709 710 xs->error = XS_DRIVER_STUFFUP; 711 scsi_done(xs); 712 goto ready; 713 } 714 bus_dmamap_sync(sc->sc_dmat, xfer, 0, 715 xfer->dm_mapsize, 716 (xs->flags & SCSI_DATA_IN) ? 717 BUS_DMASYNC_PREREAD : 718 BUS_DMASYNC_PREWRITE); 719 } 720 721 gdt_enqueue_ccb(sc, ccb); 722 /* XXX what if enqueue did not start a transfer? */ 723 if (gdt_polling || (xs->flags & SCSI_POLL)) { 724 if (!gdt_wait(sc, ccb, ccb->gc_timeout)) { 725 printf("%s: command %d timed out\n", 726 DEVNAME(sc), 727 ccb->gc_cmd_index); 728 xs->error = XS_TIMEOUT; 729 scsi_done(xs); 730 splx(s); 731 return; 732 } 733 } 734 } 735 736 ready: 737 /* 738 * Don't process the queue if we are polling. 739 */ 740 if (polled) { 741 break; 742 } 743 } 744 745 splx(s); 746 } 747 748 /* XXX Currently only for cacheservice, returns 0 if busy */ 749 int 750 gdt_exec_ccb(struct gdt_ccb *ccb) 751 { 752 struct scsi_xfer *xs = ccb->gc_xs; 753 struct scsi_link *link = xs->sc_link; 754 struct gdt_softc *sc = link->adapter_softc; 755 u_int8_t target = link->target; 756 u_int32_t sg_canz; 757 bus_dmamap_t xfer; 758 int i; 759 #if 1 /* XXX */ 760 static int __level = 0; 761 762 if (__level++ > 0) 763 panic("level > 0"); 764 #endif 765 GDT_DPRINTF(GDT_D_CMD, ("gdt_exec_ccb(%p, %p) ", xs, ccb)); 766 767 sc->sc_cmd_cnt = 0; 768 769 /* 770 * XXX Yeah I know it's an always-true condition, but that may change 771 * later. 772 */ 773 if (sc->sc_cmd_cnt == 0) 774 sc->sc_set_sema0(sc); 775 776 gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX, ccb->gc_cmd_index); 777 gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD); 778 gdt_enc16(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DEVICENO, 779 target); 780 781 switch (xs->cmd->opcode) { 782 case PREVENT_ALLOW: 783 case SYNCHRONIZE_CACHE: 784 if (xs->cmd->opcode == PREVENT_ALLOW) { 785 /* XXX PREVENT_ALLOW support goes here */ 786 } else { 787 GDT_DPRINTF(GDT_D_CMD, 788 ("SYNCHRONIZE CACHE tgt %d ", target)); 789 sc->sc_cmd[GDT_CMD_OPCODE] = GDT_FLUSH; 790 } 791 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, 792 1); 793 sg_canz = 0; 794 break; 795 796 case WRITE_COMMAND: 797 case WRITE_BIG: 798 /* XXX WRITE_THR could be supported too */ 799 sc->sc_cmd[GDT_CMD_OPCODE] = GDT_WRITE; 800 break; 801 802 case READ_COMMAND: 803 case READ_BIG: 804 sc->sc_cmd[GDT_CMD_OPCODE] = GDT_READ; 805 break; 806 } 807 808 if (xs->cmd->opcode != PREVENT_ALLOW && 809 xs->cmd->opcode != SYNCHRONIZE_CACHE) { 810 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKNO, 811 ccb->gc_blockno); 812 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_BLOCKCNT, 813 ccb->gc_blockcnt); 814 815 xfer = ccb->gc_dmamap_xfer; 816 if (sc->sc_cache_feat & GDT_SCATTER_GATHER) { 817 gdt_enc32( 818 sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR, 819 0xffffffff); 820 for (i = 0; i < xfer->dm_nsegs; i++) { 821 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + 822 GDT_CACHE_SG_LST + i * GDT_SG_SZ + 823 GDT_SG_PTR, 824 xfer->dm_segs[i].ds_addr); 825 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + 826 GDT_CACHE_SG_LST + i * GDT_SG_SZ + 827 GDT_SG_LEN, 828 xfer->dm_segs[i].ds_len); 829 GDT_DPRINTF(GDT_D_IO, 830 ("#%d va %p pa %p len %x\n", i, buf, 831 xfer->dm_segs[i].ds_addr, 832 xfer->dm_segs[i].ds_len)); 833 } 834 sg_canz = xfer->dm_nsegs; 835 gdt_enc32( 836 sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_LST + 837 sg_canz * GDT_SG_SZ + GDT_SG_LEN, 0); 838 } else { 839 /* XXX Hardly correct */ 840 gdt_enc32( 841 sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_DESTADDR, 842 xfer->dm_segs[0].ds_addr); 843 sg_canz = 0; 844 } 845 } 846 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + GDT_CACHE_SG_CANZ, sg_canz); 847 848 sc->sc_cmd_len = 849 roundup(GDT_CMD_UNION + GDT_CACHE_SG_LST + sg_canz * GDT_SG_SZ, 850 sizeof (u_int32_t)); 851 852 if (sc->sc_cmd_cnt > 0 && 853 sc->sc_cmd_off + sc->sc_cmd_len + GDT_DPMEM_COMMAND_OFFSET > 854 sc->sc_ic_all_size) { 855 printf("%s: DPMEM overflow\n", DEVNAME(sc)); 856 xs->error = XS_BUSY; 857 #if 1 /* XXX */ 858 __level--; 859 #endif 860 return (0); 861 } 862 863 sc->sc_copy_cmd(sc, ccb); 864 sc->sc_release_event(sc, ccb); 865 866 xs->error = XS_NOERROR; 867 xs->resid = 0; 868 #if 1 /* XXX */ 869 __level--; 870 #endif 871 return (1); 872 } 873 874 void 875 gdt_copy_internal_data(struct scsi_xfer *xs, u_int8_t *data, size_t size) 876 { 877 size_t copy_cnt; 878 879 GDT_DPRINTF(GDT_D_MISC, ("gdt_copy_internal_data ")); 880 881 if (!xs->datalen) 882 printf("uio move not yet supported\n"); 883 else { 884 copy_cnt = MIN(size, xs->datalen); 885 bcopy(data, xs->data, copy_cnt); 886 } 887 } 888 889 /* Emulated SCSI operation on cache device */ 890 void 891 gdt_internal_cache_cmd(struct scsi_xfer *xs) 892 { 893 struct scsi_link *link = xs->sc_link; 894 struct gdt_softc *sc = link->adapter_softc; 895 struct scsi_inquiry_data inq; 896 struct scsi_sense_data sd; 897 struct scsi_read_cap_data rcd; 898 u_int8_t target = link->target; 899 900 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cache_cmd ")); 901 902 switch (xs->cmd->opcode) { 903 case TEST_UNIT_READY: 904 case START_STOP: 905 #if 0 906 case VERIFY: 907 #endif 908 GDT_DPRINTF(GDT_D_CMD, ("opc %d tgt %d ", xs->cmd->opcode, 909 target)); 910 break; 911 912 case REQUEST_SENSE: 913 GDT_DPRINTF(GDT_D_CMD, ("REQUEST SENSE tgt %d ", target)); 914 bzero(&sd, sizeof sd); 915 sd.error_code = SSD_ERRCODE_CURRENT; 916 sd.segment = 0; 917 sd.flags = SKEY_NO_SENSE; 918 gdt_enc32(sd.info, 0); 919 sd.extra_len = 0; 920 gdt_copy_internal_data(xs, (u_int8_t *)&sd, sizeof sd); 921 break; 922 923 case INQUIRY: 924 GDT_DPRINTF(GDT_D_CMD, ("INQUIRY tgt %d devtype %x ", target, 925 sc->sc_hdr[target].hd_devtype)); 926 bzero(&inq, sizeof inq); 927 inq.device = 928 (sc->sc_hdr[target].hd_devtype & 4) ? T_CDROM : T_DIRECT; 929 inq.dev_qual2 = 930 (sc->sc_hdr[target].hd_devtype & 1) ? SID_REMOVABLE : 0; 931 inq.version = 2; 932 inq.response_format = 2; 933 inq.additional_length = 32; 934 inq.flags |= SID_CmdQue; 935 strlcpy(inq.vendor, "ICP ", sizeof inq.vendor); 936 snprintf(inq.product, sizeof inq.product, "Host drive #%02d", 937 target); 938 strlcpy(inq.revision, " ", sizeof inq.revision); 939 gdt_copy_internal_data(xs, (u_int8_t *)&inq, sizeof inq); 940 break; 941 942 case READ_CAPACITY: 943 GDT_DPRINTF(GDT_D_CMD, ("READ CAPACITY tgt %d ", target)); 944 bzero(&rcd, sizeof rcd); 945 _lto4b(sc->sc_hdr[target].hd_size - 1, rcd.addr); 946 _lto4b(GDT_SECTOR_SIZE, rcd.length); 947 gdt_copy_internal_data(xs, (u_int8_t *)&rcd, sizeof rcd); 948 break; 949 950 default: 951 GDT_DPRINTF(GDT_D_CMD, ("unsupported scsi command %#x tgt %d ", 952 xs->cmd->opcode, target)); 953 xs->error = XS_DRIVER_STUFFUP; 954 return; 955 } 956 957 xs->error = XS_NOERROR; 958 } 959 960 void 961 gdt_clear_events(struct gdt_softc *sc) 962 { 963 GDT_DPRINTF(GDT_D_MISC, ("gdt_clear_events(%p) ", sc)); 964 965 /* XXX To be implemented */ 966 } 967 968 int 969 gdt_async_event(struct gdt_softc *sc, int service) 970 { 971 GDT_DPRINTF(GDT_D_INTR, ("gdt_async_event(%p, %d) ", sc, service)); 972 973 if (service == GDT_SCREENSERVICE) { 974 /* XXX To be implemented */ 975 } else { 976 /* XXX To be implemented */ 977 } 978 979 return (0); 980 } 981 982 int 983 gdt_sync_event(struct gdt_softc *sc, int service, u_int8_t index, 984 struct scsi_xfer *xs) 985 { 986 GDT_DPRINTF(GDT_D_INTR, 987 ("gdt_sync_event(%p, %d, %d, %p) ", sc, service, index, xs)); 988 989 if (service == GDT_SCREENSERVICE) { 990 GDT_DPRINTF(GDT_D_INTR, ("service == GDT_SCREENSERVICE ")); 991 /* XXX To be implemented */ 992 return (0); 993 } else { 994 switch (sc->sc_status) { 995 case GDT_S_OK: 996 GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_OK ")); 997 /* XXX To be implemented */ 998 break; 999 case GDT_S_BSY: 1000 GDT_DPRINTF(GDT_D_INTR, ("sc_status == GDT_S_BSY ")); 1001 /* XXX To be implemented */ 1002 return (2); 1003 default: 1004 GDT_DPRINTF(GDT_D_INTR, ("sc_status is %d ", 1005 sc->sc_status)); 1006 /* XXX To be implemented */ 1007 return (0); 1008 } 1009 } 1010 1011 return (1); 1012 } 1013 1014 int 1015 gdt_intr(void *arg) 1016 { 1017 struct gdt_softc *sc = arg; 1018 struct gdt_intr_ctx ctx; 1019 int chain = 1; 1020 int sync_val = 0; 1021 struct scsi_xfer *xs = NULL; 1022 int prev_cmd; 1023 struct gdt_ccb *ccb; 1024 1025 GDT_DPRINTF(GDT_D_INTR, ("gdt_intr(%p) ", sc)); 1026 1027 /* If polling and we were not called from gdt_wait, just return */ 1028 if (gdt_polling && !gdt_from_wait) 1029 return (0); 1030 1031 ctx.istatus = sc->sc_get_status(sc); 1032 if (!ctx.istatus) { 1033 sc->sc_status = GDT_S_NO_STATUS; 1034 return (0); 1035 } 1036 1037 gdt_wait_index = 0; 1038 ctx.service = ctx.info2 = 0; 1039 1040 sc->sc_intr(sc, &ctx); 1041 1042 sc->sc_status = ctx.cmd_status; 1043 sc->sc_info = ctx.info; 1044 sc->sc_info2 = ctx.info2; 1045 1046 if (gdt_from_wait) { 1047 gdt_wait_gdt = sc; 1048 gdt_wait_index = ctx.istatus; 1049 } 1050 1051 switch (ctx.istatus) { 1052 case GDT_ASYNCINDEX: 1053 gdt_async_event(sc, ctx.service); 1054 goto finish; 1055 1056 case GDT_SPEZINDEX: 1057 printf("%s: uninitialized or unknown service (%d %d)\n", 1058 DEVNAME(sc), ctx.info, ctx.info2); 1059 chain = 0; 1060 goto finish; 1061 } 1062 1063 ccb = &sc->sc_ccbs[ctx.istatus - 2]; 1064 xs = ccb->gc_xs; 1065 if (!gdt_polling) 1066 timeout_del(&xs->stimeout); 1067 ctx.service = ccb->gc_service; 1068 prev_cmd = ccb->gc_flags & GDT_GCF_CMD_MASK; 1069 if (xs && xs->cmd->opcode != PREVENT_ALLOW && 1070 xs->cmd->opcode != SYNCHRONIZE_CACHE) { 1071 bus_dmamap_sync(sc->sc_dmat, ccb->gc_dmamap_xfer, 0, 1072 ccb->gc_dmamap_xfer->dm_mapsize, 1073 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD : 1074 BUS_DMASYNC_POSTWRITE); 1075 bus_dmamap_unload(sc->sc_dmat, ccb->gc_dmamap_xfer); 1076 } 1077 switch (prev_cmd) { 1078 case GDT_GCF_UNUSED: 1079 /* XXX Not yet implemented */ 1080 chain = 0; 1081 goto finish; 1082 case GDT_GCF_INTERNAL: 1083 chain = 0; 1084 goto finish; 1085 } 1086 1087 sync_val = gdt_sync_event(sc, ctx.service, ctx.istatus, xs); 1088 1089 finish: 1090 switch (sync_val) { 1091 case 0: 1092 if (xs && gdt_from_wait) 1093 scsi_done(xs); 1094 break; 1095 case 1: 1096 scsi_done(xs); 1097 break; 1098 1099 case 2: 1100 gdt_enqueue(sc, xs, 0); 1101 } 1102 1103 if (chain) 1104 gdt_chain(sc); 1105 1106 return (1); 1107 } 1108 1109 void 1110 gdtminphys(struct buf *bp, struct scsi_link *sl) 1111 { 1112 GDT_DPRINTF(GDT_D_MISC, ("gdtminphys(0x%x) ", bp)); 1113 1114 /* As this is way more than MAXPHYS it's really not necessary. */ 1115 if ((GDT_MAXOFFSETS - 1) * PAGE_SIZE < MAXPHYS && 1116 bp->b_bcount > ((GDT_MAXOFFSETS - 1) * PAGE_SIZE)) 1117 bp->b_bcount = ((GDT_MAXOFFSETS - 1) * PAGE_SIZE); 1118 1119 minphys(bp); 1120 } 1121 1122 int 1123 gdt_wait(struct gdt_softc *sc, struct gdt_ccb *ccb, int timeout) 1124 { 1125 int s, rslt, rv = 0; 1126 1127 GDT_DPRINTF(GDT_D_MISC, 1128 ("gdt_wait(%p, %p, %d) ", sc, ccb, timeout)); 1129 1130 gdt_from_wait = 1; 1131 do { 1132 s = splbio(); 1133 rslt = gdt_intr(sc); 1134 splx(s); 1135 if (rslt && sc == gdt_wait_gdt && 1136 ccb->gc_cmd_index == gdt_wait_index) { 1137 rv = 1; 1138 break; 1139 } 1140 DELAY(1000); /* 1 millisecond */ 1141 } while (--timeout); 1142 gdt_from_wait = 0; 1143 1144 while (sc->sc_test_busy(sc)) 1145 DELAY(0); /* XXX correct? */ 1146 1147 return (rv); 1148 } 1149 1150 int 1151 gdt_internal_cmd(struct gdt_softc *sc, u_int8_t service, u_int16_t opcode, 1152 u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) 1153 { 1154 int retries, rslt; 1155 struct gdt_ccb *ccb; 1156 1157 GDT_DPRINTF(GDT_D_CMD, ("gdt_internal_cmd(%p, %d, %d, %d, %d, %d) ", 1158 sc, service, opcode, arg1, arg2, arg3)); 1159 1160 bzero(sc->sc_cmd, GDT_CMD_SZ); 1161 1162 for (retries = GDT_RETRIES; ; ) { 1163 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP); 1164 if (ccb == NULL) { 1165 printf("%s: no free command index found\n", 1166 DEVNAME(sc)); 1167 return (0); 1168 } 1169 ccb->gc_service = service; 1170 ccb->gc_xs = NULL; 1171 ccb->gc_blockno = ccb->gc_blockcnt = 0; 1172 ccb->gc_timeout = ccb->gc_flags = 0; 1173 ccb->gc_service = GDT_CACHESERVICE; 1174 gdt_ccb_set_cmd(ccb, GDT_GCF_INTERNAL); 1175 1176 sc->sc_set_sema0(sc); 1177 gdt_enc32(sc->sc_cmd + GDT_CMD_COMMANDINDEX, 1178 ccb->gc_cmd_index); 1179 gdt_enc16(sc->sc_cmd + GDT_CMD_OPCODE, opcode); 1180 gdt_enc32(sc->sc_cmd + GDT_CMD_BOARDNODE, GDT_LOCALBOARD); 1181 1182 switch (service) { 1183 case GDT_CACHESERVICE: 1184 if (opcode == GDT_IOCTL) { 1185 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + 1186 GDT_IOCTL_SUBFUNC, arg1); 1187 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + 1188 GDT_IOCTL_CHANNEL, arg2); 1189 gdt_enc16(sc->sc_cmd + GDT_CMD_UNION + 1190 GDT_IOCTL_PARAM_SIZE, (u_int16_t)arg3); 1191 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + 1192 GDT_IOCTL_P_PARAM, 1193 sc->sc_scratch_seg.ds_addr); 1194 } else { 1195 gdt_enc16(sc->sc_cmd + GDT_CMD_UNION + 1196 GDT_CACHE_DEVICENO, (u_int16_t)arg1); 1197 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + 1198 GDT_CACHE_BLOCKNO, arg2); 1199 } 1200 break; 1201 1202 case GDT_SCSIRAWSERVICE: 1203 gdt_enc32(sc->sc_cmd + GDT_CMD_UNION + 1204 GDT_RAW_DIRECTION, arg1); 1205 sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_BUS] = 1206 (u_int8_t)arg2; 1207 sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_TARGET] = 1208 (u_int8_t)arg3; 1209 sc->sc_cmd[GDT_CMD_UNION + GDT_RAW_LUN] = 1210 (u_int8_t)(arg3 >> 8); 1211 } 1212 1213 sc->sc_cmd_len = GDT_CMD_SZ; 1214 sc->sc_cmd_off = 0; 1215 sc->sc_cmd_cnt = 0; 1216 sc->sc_copy_cmd(sc, ccb); 1217 sc->sc_release_event(sc, ccb); 1218 DELAY(20); 1219 1220 rslt = gdt_wait(sc, ccb, GDT_POLL_TIMEOUT); 1221 scsi_io_put(&sc->sc_iopool, ccb); 1222 1223 if (!rslt) 1224 return (0); 1225 if (sc->sc_status != GDT_S_BSY || --retries == 0) 1226 break; 1227 DELAY(1); 1228 } 1229 return (sc->sc_status == GDT_S_OK); 1230 } 1231 1232 void * 1233 gdt_ccb_alloc(void *xsc) 1234 { 1235 struct gdt_softc *sc = xsc; 1236 struct gdt_ccb *ccb; 1237 1238 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_alloc(%p) ", sc)); 1239 1240 mtx_enter(&sc->sc_ccb_mtx); 1241 ccb = TAILQ_FIRST(&sc->sc_free_ccb); 1242 if (ccb != NULL) 1243 TAILQ_REMOVE(&sc->sc_free_ccb, ccb, gc_chain); 1244 mtx_leave(&sc->sc_ccb_mtx); 1245 1246 return (ccb); 1247 } 1248 1249 void 1250 gdt_ccb_free(void *xsc, void *xccb) 1251 { 1252 struct gdt_softc *sc = xsc; 1253 struct gdt_ccb *ccb = xccb; 1254 int wake = 0; 1255 1256 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_ccb_free(%p, %p) ", sc, ccb)); 1257 1258 mtx_enter(&sc->sc_ccb_mtx); 1259 TAILQ_INSERT_HEAD(&sc->sc_free_ccb, ccb, gc_chain); 1260 /* If the free list was empty, wake up potential waiters. */ 1261 if (TAILQ_NEXT(ccb, gc_chain) == NULL) 1262 wake = 1; 1263 mtx_leave(&sc->sc_ccb_mtx); 1264 1265 if (wake) 1266 wakeup(&sc->sc_free_ccb); 1267 } 1268 1269 void 1270 gdt_enqueue_ccb(struct gdt_softc *sc, struct gdt_ccb *ccb) 1271 { 1272 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_enqueue_ccb(%p, %p) ", sc, ccb)); 1273 1274 timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb); 1275 TAILQ_INSERT_TAIL(&sc->sc_ccbq, ccb, gc_chain); 1276 gdt_start_ccbs(sc); 1277 } 1278 1279 void 1280 gdt_start_ccbs(struct gdt_softc *sc) 1281 { 1282 struct gdt_ccb *ccb; 1283 struct scsi_xfer *xs; 1284 1285 GDT_DPRINTF(GDT_D_QUEUE, ("gdt_start_ccbs(%p) ", sc)); 1286 1287 while ((ccb = TAILQ_FIRST(&sc->sc_ccbq)) != NULL) { 1288 1289 xs = ccb->gc_xs; 1290 if (ccb->gc_flags & GDT_GCF_WATCHDOG) 1291 timeout_del(&xs->stimeout); 1292 1293 if (gdt_exec_ccb(ccb) == 0) { 1294 ccb->gc_flags |= GDT_GCF_WATCHDOG; 1295 timeout_set(&ccb->gc_xs->stimeout, gdt_watchdog, ccb); 1296 timeout_add_msec(&xs->stimeout, GDT_WATCH_TIMEOUT); 1297 break; 1298 } 1299 TAILQ_REMOVE(&sc->sc_ccbq, ccb, gc_chain); 1300 1301 if ((xs->flags & SCSI_POLL) == 0) { 1302 timeout_set(&ccb->gc_xs->stimeout, gdt_timeout, ccb); 1303 timeout_add_msec(&xs->stimeout, ccb->gc_timeout); 1304 } 1305 } 1306 } 1307 1308 void 1309 gdt_chain(struct gdt_softc *sc) 1310 { 1311 GDT_DPRINTF(GDT_D_INTR, ("gdt_chain(%p) ", sc)); 1312 1313 if (LIST_FIRST(&sc->sc_queue)) 1314 gdt_scsi_cmd(LIST_FIRST(&sc->sc_queue)); 1315 } 1316 1317 void 1318 gdt_timeout(void *arg) 1319 { 1320 struct gdt_ccb *ccb = arg; 1321 struct scsi_link *link = ccb->gc_xs->sc_link; 1322 struct gdt_softc *sc = link->adapter_softc; 1323 int s; 1324 1325 sc_print_addr(link); 1326 printf("timed out\n"); 1327 1328 /* XXX Test for multiple timeouts */ 1329 1330 ccb->gc_xs->error = XS_TIMEOUT; 1331 s = splbio(); 1332 gdt_enqueue_ccb(sc, ccb); 1333 splx(s); 1334 } 1335 1336 void 1337 gdt_watchdog(void *arg) 1338 { 1339 struct gdt_ccb *ccb = arg; 1340 struct scsi_link *link = ccb->gc_xs->sc_link; 1341 struct gdt_softc *sc = link->adapter_softc; 1342 int s; 1343 1344 s = splbio(); 1345 ccb->gc_flags &= ~GDT_GCF_WATCHDOG; 1346 gdt_start_ccbs(sc); 1347 splx(s); 1348 } 1349 1350 #if NBIO > 0 1351 int 1352 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr) 1353 { 1354 struct gdt_softc *sc = (struct gdt_softc *)dev; 1355 int error = 0; 1356 1357 GDT_DPRINTF(GDT_D_IOCTL, ("%s: ioctl ", DEVNAME(sc))); 1358 1359 switch (cmd) { 1360 case BIOCINQ: 1361 GDT_DPRINTF(GDT_D_IOCTL, ("inq ")); 1362 error = gdt_ioctl_inq(sc, (struct bioc_inq *)addr); 1363 break; 1364 1365 case BIOCVOL: 1366 GDT_DPRINTF(GDT_D_IOCTL, ("vol ")); 1367 error = gdt_ioctl_vol(sc, (struct bioc_vol *)addr); 1368 break; 1369 1370 case BIOCDISK: 1371 GDT_DPRINTF(GDT_D_IOCTL, ("disk ")); 1372 error = gdt_ioctl_disk(sc, (struct bioc_disk *)addr); 1373 break; 1374 1375 case BIOCALARM: 1376 GDT_DPRINTF(GDT_D_IOCTL, ("alarm ")); 1377 error = gdt_ioctl_alarm(sc, (struct bioc_alarm *)addr); 1378 break; 1379 1380 case BIOCSETSTATE: 1381 GDT_DPRINTF(GDT_D_IOCTL, ("setstate ")); 1382 error = gdt_ioctl_setstate(sc, (struct bioc_setstate *)addr); 1383 break; 1384 1385 default: 1386 GDT_DPRINTF(GDT_D_IOCTL, (" invalid ioctl\n")); 1387 error = EINVAL; 1388 } 1389 1390 return (error); 1391 } 1392 1393 int 1394 gdt_ioctl_inq(struct gdt_softc *sc, struct bioc_inq *bi) 1395 { 1396 bi->bi_novol = sc->sc_ndevs; 1397 bi->bi_nodisk = sc->sc_total_disks; 1398 1399 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev)); 1400 1401 return (0); 1402 } 1403 1404 int 1405 gdt_ioctl_vol(struct gdt_softc *sc, struct bioc_vol *bv) 1406 { 1407 return (1); /* XXX not yet */ 1408 } 1409 1410 int 1411 gdt_ioctl_disk(struct gdt_softc *sc, struct bioc_disk *bd) 1412 { 1413 return (1); /* XXX not yet */ 1414 } 1415 1416 int 1417 gdt_ioctl_alarm(struct gdt_softc *sc, struct bioc_alarm *ba) 1418 { 1419 return (1); /* XXX not yet */ 1420 } 1421 1422 int 1423 gdt_ioctl_setstate(struct gdt_softc *sc, struct bioc_setstate *bs) 1424 { 1425 return (1); /* XXX not yet */ 1426 } 1427 1428 #if 0 1429 int 1430 gdt_ioctl(struct device *dev, u_long cmd, caddr_t addr) 1431 { 1432 int error = 0; 1433 struct gdt_dummy *dummy; 1434 1435 switch (cmd) { 1436 case GDT_IOCTL_DUMMY: 1437 dummy = (struct gdt_dummy *)addr; 1438 printf("%s: GDT_IOCTL_DUMMY %d\n", dev->dv_xname, dummy->x++); 1439 break; 1440 1441 case GDT_IOCTL_GENERAL: { 1442 gdt_ucmd_t *ucmd; 1443 struct gdt_softc *sc = (struct gdt_softc *)dev; 1444 int s; 1445 1446 ucmd = (gdt_ucmd_t *)addr; 1447 s = splbio(); 1448 TAILQ_INSERT_TAIL(&sc->sc_ucmdq, ucmd, links); 1449 ucmd->complete_flag = FALSE; 1450 splx(s); 1451 gdt_chain(sc); 1452 if (!ucmd->complete_flag) 1453 (void)tsleep((void *)ucmd, PCATCH | PRIBIO, "gdtucw", 1454 0); 1455 break; 1456 } 1457 1458 case GDT_IOCTL_DRVERS: 1459 ((gdt_drvers_t *)addr)->vers = 1460 (GDT_DRIVER_VERSION << 8) | GDT_DRIVER_SUBVERSION; 1461 break; 1462 1463 case GDT_IOCTL_CTRCNT: 1464 ((gdt_ctrcnt_t *)addr)->cnt = gdt_cnt; 1465 break; 1466 1467 #ifdef notyet 1468 case GDT_IOCTL_CTRTYPE: { 1469 gdt_ctrt_t *p; 1470 struct gdt_softc *sc = (struct gdt_softc *)dev; 1471 1472 p = (gdt_ctrt_t *)addr; 1473 p->oem_id = 0x8000; 1474 p->type = 0xfd; 1475 p->info = (sc->sc_bus << 8) | (sc->sc_slot << 3); 1476 p->ext_type = 0x6000 | sc->sc_subdevice; 1477 p->device_id = sc->sc_device; 1478 p->sub_device_id = sc->sc_subdevice; 1479 break; 1480 } 1481 #endif 1482 1483 case GDT_IOCTL_OSVERS: { 1484 gdt_osv_t *p; 1485 1486 p = (gdt_osv_t *)addr; 1487 p->oscode = 10; 1488 p->version = osrelease[0] - '0'; 1489 if (osrelease[1] == '.') 1490 p->subversion = osrelease[2] - '0'; 1491 else 1492 p->subversion = 0; 1493 if (osrelease[3] == '.') 1494 p->revision = osrelease[4] - '0'; 1495 else 1496 p->revision = 0; 1497 strlcpy(p->name, ostype, sizeof p->name); 1498 break; 1499 } 1500 1501 #ifdef notyet 1502 case GDT_IOCTL_EVENT: { 1503 gdt_event_t *p; 1504 int s; 1505 1506 p = (gdt_event_t *)addr; 1507 if (p->erase == 0xff) { 1508 if (p->dvr.event_source == GDT_ES_TEST) 1509 p->dvr.event_data.size = 1510 sizeof(p->dvr.event_data.eu.test); 1511 else if (p->dvr.event_source == GDT_ES_DRIVER) 1512 p->dvr.event_data.size = 1513 sizeof(p->dvr.event_data.eu.driver); 1514 else if (p->dvr.event_source == GDT_ES_SYNC) 1515 p->dvr.event_data.size = 1516 sizeof(p->dvr.event_data.eu.sync); 1517 else 1518 p->dvr.event_data.size = 1519 sizeof(p->dvr.event_data.eu.async); 1520 s = splbio(); 1521 gdt_store_event(p->dvr.event_source, p->dvr.event_idx, 1522 &p->dvr.event_data); 1523 splx(s); 1524 } else if (p->erase == 0xfe) { 1525 s = splbio(); 1526 gdt_clear_events(); 1527 splx(s); 1528 } else if (p->erase == 0) { 1529 p->handle = gdt_read_event(p->handle, &p->dvr); 1530 } else { 1531 gdt_readapp_event((u_int8_t)p->erase, &p->dvr); 1532 } 1533 break; 1534 } 1535 #endif 1536 1537 case GDT_IOCTL_STATIST: 1538 #if 0 1539 bcopy(&gdt_stat, (gdt_statist_t *)addr, sizeof gdt_stat); 1540 #else 1541 error = EOPNOTSUPP; 1542 #endif 1543 break; 1544 1545 default: 1546 error = EINVAL; 1547 } 1548 return (error); 1549 } 1550 #endif /* 0 */ 1551 #endif /* NBIO > 0 */ 1552