1 /* $NetBSD: dpt.c,v 1.78 2021/11/10 15:21:43 msaitoh Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran, Charles M. Hannum and by Jason R. Thorpe of the Numerical
9 * Aerospace Simulation Facility, NASA Ames Research Center.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
35 * Copyright (c) 2000 Adaptec Corporation
36 * All rights reserved.
37 *
38 * TERMS AND CONDITIONS OF USE
39 *
40 * Redistribution and use in source form, with or without modification, are
41 * permitted provided that redistributions of source code must retain the
42 * above copyright notice, this list of conditions and the following disclaimer.
43 *
44 * This software is provided `as is' by Adaptec and any express or implied
45 * warranties, including, but not limited to, the implied warranties of
46 * merchantability and fitness for a particular purpose, are disclaimed. In no
47 * event shall Adaptec be liable for any direct, indirect, incidental, special,
48 * exemplary or consequential damages (including, but not limited to,
49 * procurement of substitute goods or services; loss of use, data, or profits;
50 * or business interruptions) however caused and on any theory of liability,
51 * whether in contract, strict liability, or tort (including negligence or
52 * otherwise) arising in any way out of the use of this driver software, even
53 * if advised of the possibility of such damage.
54 */
55
56 /*
57 * Portions of this code fall under the following copyright:
58 *
59 * Originally written by Julian Elischer (julian@tfs.com)
60 * for TRW Financial Systems for use under the MACH(2.5) operating system.
61 *
62 * TRW Financial Systems, in accordance with their agreement with Carnegie
63 * Mellon University, makes this software available to CMU to distribute
64 * or use in any manner that they see fit as long as this message is kept with
65 * the software. For this reason TFS also grants any other persons or
66 * organisations permission to use or modify this software.
67 *
68 * TFS supplies this software to be publicly redistributed
69 * on the understanding that TFS is not responsible for the correct
70 * functioning of this software in any circumstances.
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: dpt.c,v 1.78 2021/11/10 15:21:43 msaitoh Exp $");
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/device.h>
79 #include <sys/queue.h>
80 #include <sys/buf.h>
81 #include <sys/endian.h>
82 #include <sys/conf.h>
83 #include <sys/kauth.h>
84 #include <sys/proc.h>
85 #include <sys/mutex.h>
86
87 #include <sys/bus.h>
88 #ifdef i386
89 #include <machine/pio.h>
90 #include <machine/cputypes.h>
91 #endif
92
93 #include <dev/scsipi/scsi_all.h>
94 #include <dev/scsipi/scsipi_all.h>
95 #include <dev/scsipi/scsiconf.h>
96
97 #include <dev/ic/dptreg.h>
98 #include <dev/ic/dptvar.h>
99
100 #include <dev/i2o/dptivar.h>
101
102 #include "ioconf.h"
103
104 #ifdef DEBUG
105 #define DPRINTF(x) printf x
106 #else
107 #define DPRINTF(x)
108 #endif
109
110 #define dpt_inb(x, o) \
111 bus_space_read_1((x)->sc_iot, (x)->sc_ioh, (o))
112 #define dpt_outb(x, o, d) \
113 bus_space_write_1((x)->sc_iot, (x)->sc_ioh, (o), (d))
114
115 static const char * const dpt_cname[] = {
116 "3334", "SmartRAID IV",
117 "3332", "SmartRAID IV",
118 "2144", "SmartCache IV",
119 "2044", "SmartCache IV",
120 "2142", "SmartCache IV",
121 "2042", "SmartCache IV",
122 "2041", "SmartCache IV",
123 "3224", "SmartRAID III",
124 "3222", "SmartRAID III",
125 "3021", "SmartRAID III",
126 "2124", "SmartCache III",
127 "2024", "SmartCache III",
128 "2122", "SmartCache III",
129 "2022", "SmartCache III",
130 "2021", "SmartCache III",
131 "2012", "SmartCache Plus",
132 "2011", "SmartCache Plus",
133 NULL, "<unknown>",
134 };
135
136 static void *dpt_sdh;
137
138 dev_type_open(dptopen);
139 dev_type_ioctl(dptioctl);
140
141 const struct cdevsw dpt_cdevsw = {
142 .d_open = dptopen,
143 .d_close = nullclose,
144 .d_read = noread,
145 .d_write = nowrite,
146 .d_ioctl = dptioctl,
147 .d_stop = nostop,
148 .d_tty = notty,
149 .d_poll = nopoll,
150 .d_mmap = nommap,
151 .d_kqfilter = nokqfilter,
152 .d_discard = nodiscard,
153 .d_flag = D_OTHER,
154 };
155
156 static struct dpt_sig dpt_sig = {
157 { 'd', 'P', 't', 'S', 'i', 'G'},
158 SIG_VERSION,
159 #if defined(i386)
160 PROC_INTEL,
161 #elif defined(powerpc)
162 PROC_POWERPC,
163 #elif defined(alpha)
164 PROC_ALPHA,
165 #elif defined(__mips__)
166 PROC_MIPS,
167 #elif defined(sparc64)
168 PROC_ULTRASPARC,
169 #else
170 0xff,
171 #endif
172 #if defined(i386)
173 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
174 #else
175 0,
176 #endif
177 FT_HBADRVR,
178 0,
179 OEM_DPT,
180 OS_FREE_BSD, /* XXX */
181 CAP_ABOVE16MB,
182 DEV_ALL,
183 ADF_ALL_EATA,
184 0,
185 0,
186 DPT_VERSION,
187 DPT_REVISION,
188 DPT_SUBREVISION,
189 DPT_MONTH,
190 DPT_DAY,
191 DPT_YEAR,
192 "" /* Will be filled later */
193 };
194
195 static void dpt_ccb_abort(struct dpt_softc *, struct dpt_ccb *);
196 static void dpt_ccb_done(struct dpt_softc *, struct dpt_ccb *);
197 static int dpt_ccb_map(struct dpt_softc *, struct dpt_ccb *);
198 static int dpt_ccb_poll(struct dpt_softc *, struct dpt_ccb *);
199 static void dpt_ccb_unmap(struct dpt_softc *, struct dpt_ccb *);
200 static int dpt_cmd(struct dpt_softc *, struct dpt_ccb *, int, int);
201 static void dpt_ctlrinfo(struct dpt_softc *, struct dpt_eata_ctlrinfo *);
202 static void dpt_hba_inquire(struct dpt_softc *, struct eata_inquiry_data **);
203 static void dpt_minphys(struct buf *);
204 static int dpt_passthrough(struct dpt_softc *, struct eata_ucp *,
205 struct lwp *);
206 static void dpt_scsipi_request(struct scsipi_channel *,
207 scsipi_adapter_req_t, void *);
208 static void dpt_shutdown(void *);
209 static void dpt_sysinfo(struct dpt_softc *, struct dpt_sysinfo *);
210 static int dpt_wait(struct dpt_softc *, u_int8_t, u_int8_t, int);
211
212 static inline struct dpt_ccb *dpt_ccb_alloc(struct dpt_softc *);
213 static inline void dpt_ccb_free(struct dpt_softc *, struct dpt_ccb *);
214
215 static inline struct dpt_ccb *
dpt_ccb_alloc(struct dpt_softc * sc)216 dpt_ccb_alloc(struct dpt_softc *sc)
217 {
218 struct dpt_ccb *ccb;
219 int s;
220
221 s = splbio();
222 ccb = SLIST_FIRST(&sc->sc_ccb_free);
223 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_chain);
224 splx(s);
225
226 return (ccb);
227 }
228
229 static inline void
dpt_ccb_free(struct dpt_softc * sc,struct dpt_ccb * ccb)230 dpt_ccb_free(struct dpt_softc *sc, struct dpt_ccb *ccb)
231 {
232 int s;
233
234 ccb->ccb_flg = 0;
235 ccb->ccb_savesp = NULL;
236 s = splbio();
237 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
238 splx(s);
239 }
240
241 /*
242 * Handle an interrupt from the HBA.
243 */
244 int
dpt_intr(void * cookie)245 dpt_intr(void *cookie)
246 {
247 struct dpt_softc *sc;
248 struct dpt_ccb *ccb;
249 struct eata_sp *sp;
250 int forus;
251
252 sc = cookie;
253 sp = sc->sc_stp;
254 forus = 0;
255
256 for (;;) {
257 /*
258 * HBA might have interrupted while we were dealing with the
259 * last completed command, since we ACK before we deal; keep
260 * polling.
261 */
262 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
263 break;
264 forus = 1;
265
266 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
267 sizeof(struct eata_sp), BUS_DMASYNC_POSTREAD);
268
269 /* Might have looped before HBA can reset HBA_AUX_INTR. */
270 if (sp->sp_ccbid == -1) {
271 DELAY(50);
272
273 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) == 0)
274 return (0);
275
276 printf("%s: no status\n", device_xname(sc->sc_dev));
277
278 /* Re-sync DMA map */
279 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
280 sc->sc_stpoff, sizeof(struct eata_sp),
281 BUS_DMASYNC_POSTREAD);
282 }
283
284 /* Make sure CCB ID from status packet is realistic. */
285 if ((u_int)sp->sp_ccbid >= sc->sc_nccbs) {
286 printf("%s: bogus status (returned CCB id %d)\n",
287 device_xname(sc->sc_dev), sp->sp_ccbid);
288
289 /* Ack the interrupt */
290 sp->sp_ccbid = -1;
291 (void)dpt_inb(sc, HA_STATUS);
292 continue;
293 }
294
295 /* Sync up DMA map and cache cmd status. */
296 ccb = sc->sc_ccbs + sp->sp_ccbid;
297
298 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
299 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
300
301 ccb->ccb_hba_status = sp->sp_hba_status & 0x7f;
302 ccb->ccb_scsi_status = sp->sp_scsi_status;
303 if (ccb->ccb_savesp != NULL)
304 memcpy(ccb->ccb_savesp, sp, sizeof(*sp));
305
306 /*
307 * Ack the interrupt and process the CCB. If this
308 * is a private CCB it's up to dpt_ccb_poll() to
309 * notice.
310 */
311 sp->sp_ccbid = -1;
312 ccb->ccb_flg |= CCB_INTR;
313 (void)dpt_inb(sc, HA_STATUS);
314 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
315 dpt_ccb_done(sc, ccb);
316 else if ((ccb->ccb_flg & CCB_WAIT) != 0)
317 wakeup(ccb);
318 }
319
320 return (forus);
321 }
322
323 /*
324 * Initialize and attach the HBA. This is the entry point from bus
325 * specific probe-and-attach code.
326 */
327 void
dpt_init(struct dpt_softc * sc,const char * intrstr)328 dpt_init(struct dpt_softc *sc, const char *intrstr)
329 {
330 struct scsipi_adapter *adapt;
331 struct scsipi_channel *chan;
332 struct eata_inquiry_data *ei;
333 int i, j, rv, rseg, maxchannel, maxtarget, mapsize;
334 bus_dma_segment_t seg;
335 struct eata_cfg *ec;
336 struct dpt_ccb *ccb;
337 char model[__arraycount(ei->ei_model) + __arraycount(ei->ei_suffix) + 1];
338 char vendor[__arraycount(ei->ei_vendor) + 1];
339
340 ec = &sc->sc_ec;
341 snprintf(dpt_sig.dsDescription, sizeof(dpt_sig.dsDescription),
342 "NetBSD %s DPT driver", osrelease);
343 mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_NONE);
344
345 /*
346 * Allocate the CCB/status packet/scratch DMA map and load.
347 */
348 sc->sc_nccbs =
349 uimin(be16toh(*(int16_t *)ec->ec_queuedepth), DPT_MAX_CCBS);
350 sc->sc_stpoff = sc->sc_nccbs * sizeof(struct dpt_ccb);
351 sc->sc_scroff = sc->sc_stpoff + sizeof(struct eata_sp);
352 mapsize = sc->sc_nccbs * sizeof(struct dpt_ccb) +
353 DPT_SCRATCH_SIZE + sizeof(struct eata_sp);
354
355 if ((rv = bus_dmamem_alloc(sc->sc_dmat, mapsize,
356 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) {
357 aprint_error_dev(sc->sc_dev, "unable to allocate CCBs, rv = %d\n", rv);
358 return;
359 }
360
361 if ((rv = bus_dmamem_map(sc->sc_dmat, &seg, rseg, mapsize,
362 (void **)&sc->sc_ccbs, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) {
363 aprint_error_dev(sc->sc_dev, "unable to map CCBs, rv = %d\n",
364 rv);
365 return;
366 }
367
368 if ((rv = bus_dmamap_create(sc->sc_dmat, mapsize,
369 mapsize, 1, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) {
370 aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, rv = %d\n", rv);
371 return;
372 }
373
374 if ((rv = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap,
375 sc->sc_ccbs, mapsize, NULL, BUS_DMA_NOWAIT)) != 0) {
376 aprint_error_dev(sc->sc_dev, "unable to load CCB DMA map, rv = %d\n", rv);
377 return;
378 }
379
380 sc->sc_stp = (struct eata_sp *)((char *)sc->sc_ccbs + sc->sc_stpoff);
381 sc->sc_stppa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_stpoff;
382 sc->sc_scr = (char *)sc->sc_ccbs + sc->sc_scroff;
383 sc->sc_scrpa = sc->sc_dmamap->dm_segs[0].ds_addr + sc->sc_scroff;
384 sc->sc_stp->sp_ccbid = -1;
385
386 /*
387 * Create the CCBs.
388 */
389 SLIST_INIT(&sc->sc_ccb_free);
390 memset(sc->sc_ccbs, 0, sizeof(struct dpt_ccb) * sc->sc_nccbs);
391
392 for (i = 0, ccb = sc->sc_ccbs; i < sc->sc_nccbs; i++, ccb++) {
393 rv = bus_dmamap_create(sc->sc_dmat, DPT_MAX_XFER,
394 DPT_SG_SIZE, DPT_MAX_XFER, 0,
395 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
396 &ccb->ccb_dmamap_xfer);
397 if (rv) {
398 aprint_error_dev(sc->sc_dev, "can't create ccb dmamap (%d)\n", rv);
399 break;
400 }
401
402 ccb->ccb_id = i;
403 ccb->ccb_ccbpa = sc->sc_dmamap->dm_segs[0].ds_addr +
404 CCB_OFF(sc, ccb);
405 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_chain);
406 }
407
408 if (i == 0) {
409 aprint_error_dev(sc->sc_dev, "unable to create CCBs\n");
410 return;
411 } else if (i != sc->sc_nccbs) {
412 aprint_error_dev(sc->sc_dev, "%d/%d CCBs created!\n",
413 i, sc->sc_nccbs);
414 sc->sc_nccbs = i;
415 }
416
417 /* Set shutdownhook before we start any device activity. */
418 if (dpt_sdh == NULL)
419 dpt_sdh = shutdownhook_establish(dpt_shutdown, NULL);
420
421 /* Get the inquiry data from the HBA. */
422 dpt_hba_inquire(sc, &ei);
423
424 /*
425 * dpt0 at pci0 dev 12 function 0: DPT SmartRAID III (PM3224A/9X-R)
426 * dpt0: interrupting at irq 10
427 * dpt0: 64 queued commands, 1 channel(s), adapter on ID(s) 7
428 */
429 for (i = 0; i < __arraycount(ei->ei_vendor) && ei->ei_vendor[i] != ' ';
430 i++)
431 vendor[i] = ei->ei_vendor[i];
432 vendor[i] = '\0';
433
434 for (i = 0; i < __arraycount(ei->ei_model) && ei->ei_model[i] != ' ';
435 i++)
436 model[i] = ei->ei_model[i];
437 for (j = 0; j < __arraycount(ei->ei_suffix) && ei->ei_suffix[j] != ' ';
438 i++, j++)
439 model[i] = ei->ei_suffix[j];
440 model[i] = '\0';
441
442 /* Find the marketing name for the board. */
443 for (i = 0; dpt_cname[i] != NULL; i += 2)
444 if (memcmp(ei->ei_model + 2, dpt_cname[i], 4) == 0)
445 break;
446
447 aprint_normal("%s %s (%s)\n", vendor, dpt_cname[i + 1], model);
448
449 if (intrstr != NULL)
450 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr);
451
452 maxchannel = (ec->ec_feat3 & EC_F3_MAX_CHANNEL_MASK) >>
453 EC_F3_MAX_CHANNEL_SHIFT;
454 maxtarget = (ec->ec_feat3 & EC_F3_MAX_TARGET_MASK) >>
455 EC_F3_MAX_TARGET_SHIFT;
456
457 aprint_normal_dev(sc->sc_dev,
458 "%d queued commands, %d channel(s), adapter on ID(s)",
459 sc->sc_nccbs, maxchannel + 1);
460
461 for (i = 0; i <= maxchannel; i++) {
462 sc->sc_hbaid[i] = ec->ec_hba[3 - i];
463 aprint_normal(" %d", sc->sc_hbaid[i]);
464 }
465 aprint_normal("\n");
466
467 /*
468 * Reset the SCSI controller chip(s) and bus. XXX Do we need to do
469 * this for each bus?
470 */
471 if (dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_BUS_RESET))
472 panic("%s: dpt_cmd failed", device_xname(sc->sc_dev));
473
474 /* Fill in the scsipi_adapter. */
475 adapt = &sc->sc_adapt;
476 memset(adapt, 0, sizeof(*adapt));
477 adapt->adapt_dev = sc->sc_dev;
478 adapt->adapt_nchannels = maxchannel + 1;
479 adapt->adapt_openings = sc->sc_nccbs - 1;
480 adapt->adapt_max_periph = sc->sc_nccbs - 1;
481 adapt->adapt_request = dpt_scsipi_request;
482 adapt->adapt_minphys = dpt_minphys;
483
484 for (i = 0; i <= maxchannel; i++) {
485 /* Fill in the scsipi_channel. */
486 chan = &sc->sc_chans[i];
487 memset(chan, 0, sizeof(*chan));
488 chan->chan_adapter = adapt;
489 chan->chan_bustype = &scsi_bustype;
490 chan->chan_channel = i;
491 chan->chan_ntargets = maxtarget + 1;
492 chan->chan_nluns = ec->ec_maxlun + 1;
493 chan->chan_id = sc->sc_hbaid[i];
494 config_found(sc->sc_dev, chan, scsiprint, CFARGS_NONE);
495 }
496 }
497
498 /*
499 * Read the EATA configuration from the HBA and perform some sanity checks.
500 */
501 int
dpt_readcfg(struct dpt_softc * sc)502 dpt_readcfg(struct dpt_softc *sc)
503 {
504 struct eata_cfg *ec;
505 int i, j, stat;
506 u_int16_t *p;
507
508 ec = &sc->sc_ec;
509
510 /* Older firmware may puke if we talk to it too soon after reset. */
511 dpt_outb(sc, HA_COMMAND, CP_RESET);
512 DELAY(750000);
513
514 for (i = 1000; i; i--) {
515 if ((dpt_inb(sc, HA_STATUS) & HA_ST_READY) != 0)
516 break;
517 DELAY(2000);
518 }
519
520 if (i == 0) {
521 printf("%s: HBA not ready after reset (hba status:%02x)\n",
522 device_xname(sc->sc_dev), dpt_inb(sc, HA_STATUS));
523 return (-1);
524 }
525
526 while((((stat = dpt_inb(sc, HA_STATUS))
527 != (HA_ST_READY|HA_ST_SEEK_COMPLETE))
528 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR))
529 && (stat != (HA_ST_READY|HA_ST_SEEK_COMPLETE|HA_ST_ERROR|HA_ST_DRQ)))
530 || (dpt_wait(sc, HA_ST_BUSY, 0, 2000))) {
531 /* RAID drives still spinning up? */
532 if(dpt_inb(sc, HA_ERROR) != 'D' ||
533 dpt_inb(sc, HA_ERROR + 1) != 'P' ||
534 dpt_inb(sc, HA_ERROR + 2) != 'T') {
535 printf("%s: HBA not ready\n", device_xname(sc->sc_dev));
536 return (-1);
537 }
538 }
539
540 /*
541 * Issue the read-config command and wait for the data to appear.
542 *
543 * Apparently certain firmware revisions won't DMA later on if we
544 * request the config data using PIO, but it makes it a lot easier
545 * as no DMA setup is required.
546 */
547 dpt_outb(sc, HA_COMMAND, CP_PIO_GETCFG);
548 memset(ec, 0, sizeof(*ec));
549 i = ((int)(uintptr_t)&((struct eata_cfg *)0)->ec_cfglen +
550 sizeof(ec->ec_cfglen)) >> 1;
551 p = (u_int16_t *)ec;
552
553 if (dpt_wait(sc, 0xFF, HA_ST_DATA_RDY, 2000)) {
554 printf("%s: cfg data didn't appear (hba status:%02x)\n",
555 device_xname(sc->sc_dev), dpt_inb(sc, HA_STATUS));
556 return (-1);
557 }
558
559 /* Begin reading. */
560 while (i--)
561 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
562
563 if ((i = ec->ec_cfglen) > (sizeof(struct eata_cfg)
564 - (int)(uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen))
565 - sizeof(ec->ec_cfglen)))
566 i = sizeof(struct eata_cfg)
567 - (int)(uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen))
568 - sizeof(ec->ec_cfglen);
569
570 j = i + (int)(uintptr_t)(&(((struct eata_cfg *)0L)->ec_cfglen)) +
571 sizeof(ec->ec_cfglen);
572 i >>= 1;
573
574 while (i--)
575 *p++ = bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
576
577 /* Flush until we have read 512 bytes. */
578 i = (512 - j + 1) >> 1;
579 while (i--)
580 (void)bus_space_read_stream_2(sc->sc_iot, sc->sc_ioh, HA_DATA);
581
582 /* Defaults for older firmware... */
583 if (p <= (u_short *)&ec->ec_hba[DPT_MAX_CHANNELS - 1])
584 ec->ec_hba[DPT_MAX_CHANNELS - 1] = 7;
585
586 if ((dpt_inb(sc, HA_STATUS) & HA_ST_ERROR) != 0) {
587 aprint_error_dev(sc->sc_dev, "HBA error\n");
588 return (-1);
589 }
590
591 if (memcmp(ec->ec_eatasig, "EATA", 4) != 0) {
592 aprint_error_dev(sc->sc_dev, "EATA signature mismatch\n");
593 return (-1);
594 }
595
596 if ((ec->ec_feat0 & EC_F0_HBA_VALID) == 0) {
597 aprint_error_dev(sc->sc_dev, "ec_hba field invalid\n");
598 return (-1);
599 }
600
601 if ((ec->ec_feat0 & EC_F0_DMA_SUPPORTED) == 0) {
602 aprint_error_dev(sc->sc_dev, "DMA not supported\n");
603 return (-1);
604 }
605
606 return (0);
607 }
608
609 /*
610 * Our `shutdownhook' to cleanly shut down the HBA. The HBA must flush all
611 * data from its cache and mark array groups as clean.
612 *
613 * XXX This doesn't always work (i.e., the HBA may still be flushing after
614 * we tell root that it's safe to power off).
615 */
616 static void
dpt_shutdown(void * cookie)617 dpt_shutdown(void *cookie)
618 {
619 extern struct cfdriver dpt_cd;
620 struct dpt_softc *sc;
621 int i;
622
623 printf("shutting down dpt devices...");
624
625 for (i = 0; i < dpt_cd.cd_ndevs; i++) {
626 if ((sc = device_lookup_private(&dpt_cd, i)) == NULL)
627 continue;
628 dpt_cmd(sc, NULL, CP_IMMEDIATE, CPI_POWEROFF_WARN);
629 }
630
631 delay(10000*1000);
632 printf(" done\n");
633 }
634
635 /*
636 * Send an EATA command to the HBA.
637 */
638 static int
dpt_cmd(struct dpt_softc * sc,struct dpt_ccb * ccb,int eatacmd,int icmd)639 dpt_cmd(struct dpt_softc *sc, struct dpt_ccb *ccb, int eatacmd, int icmd)
640 {
641 u_int32_t pa;
642 int i, s;
643
644 s = splbio();
645
646 for (i = 20000; i != 0; i--) {
647 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_BUSY) == 0)
648 break;
649 DELAY(50);
650 }
651 if (i == 0) {
652 splx(s);
653 return (-1);
654 }
655
656 pa = (ccb != NULL ? ccb->ccb_ccbpa : 0);
657 dpt_outb(sc, HA_DMA_BASE + 0, (pa ) & 0xff);
658 dpt_outb(sc, HA_DMA_BASE + 1, (pa >> 8) & 0xff);
659 dpt_outb(sc, HA_DMA_BASE + 2, (pa >> 16) & 0xff);
660 dpt_outb(sc, HA_DMA_BASE + 3, (pa >> 24) & 0xff);
661
662 if (eatacmd == CP_IMMEDIATE)
663 dpt_outb(sc, HA_ICMD, icmd);
664
665 dpt_outb(sc, HA_COMMAND, eatacmd);
666
667 splx(s);
668 return (0);
669 }
670
671 /*
672 * Wait for the HBA status register to reach a specific state.
673 */
674 static int
dpt_wait(struct dpt_softc * sc,u_int8_t mask,u_int8_t state,int ms)675 dpt_wait(struct dpt_softc *sc, u_int8_t mask, u_int8_t state, int ms)
676 {
677
678 for (ms *= 10; ms != 0; ms--) {
679 if ((dpt_inb(sc, HA_STATUS) & mask) == state)
680 return (0);
681 DELAY(100);
682 }
683
684 return (-1);
685 }
686
687 /*
688 * Spin waiting for a command to finish. The timeout value from the CCB is
689 * used. The CCB must be marked with CCB_PRIVATE, otherwise it'll will get
690 * recycled before we get a look at it.
691 */
692 static int
dpt_ccb_poll(struct dpt_softc * sc,struct dpt_ccb * ccb)693 dpt_ccb_poll(struct dpt_softc *sc, struct dpt_ccb *ccb)
694 {
695 int i, s;
696
697 #ifdef DEBUG
698 if ((ccb->ccb_flg & CCB_PRIVATE) == 0)
699 panic("dpt_ccb_poll: called for non-CCB_PRIVATE request");
700 #endif
701
702 s = splbio();
703
704 if ((ccb->ccb_flg & CCB_INTR) != 0) {
705 splx(s);
706 return (0);
707 }
708
709 for (i = ccb->ccb_timeout * 20; i != 0; i--) {
710 if ((dpt_inb(sc, HA_AUX_STATUS) & HA_AUX_INTR) != 0)
711 dpt_intr(sc);
712 if ((ccb->ccb_flg & CCB_INTR) != 0)
713 break;
714 DELAY(50);
715 }
716
717 splx(s);
718 return (i == 0);
719 }
720
721 /*
722 * We have a command which has been processed by the HBA, so now we look to
723 * see how the operation went. CCBs marked CCB_PRIVATE are not passed here
724 * by dpt_intr().
725 */
726 static void
dpt_ccb_done(struct dpt_softc * sc,struct dpt_ccb * ccb)727 dpt_ccb_done(struct dpt_softc *sc, struct dpt_ccb *ccb)
728 {
729 struct scsipi_xfer *xs;
730
731 xs = ccb->ccb_xs;
732
733 SC_DEBUG(xs->xs_periph, SCSIPI_DB2, ("dpt_ccb_done\n"));
734
735 /*
736 * If we were a data transfer, unload the map that described the
737 * data buffer.
738 */
739 if (xs->datalen != 0)
740 dpt_ccb_unmap(sc, ccb);
741
742 if (xs->error == XS_NOERROR) {
743 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR) {
744 switch (ccb->ccb_hba_status) {
745 case SP_HBA_ERROR_SEL_TO:
746 xs->error = XS_SELTIMEOUT;
747 break;
748 case SP_HBA_ERROR_RESET:
749 xs->error = XS_RESET;
750 break;
751 default:
752 printf("%s: HBA status %x\n",
753 device_xname(sc->sc_dev), ccb->ccb_hba_status);
754 xs->error = XS_DRIVER_STUFFUP;
755 break;
756 }
757 } else if (ccb->ccb_scsi_status != SCSI_OK) {
758 switch (ccb->ccb_scsi_status) {
759 case SCSI_CHECK:
760 memcpy(&xs->sense.scsi_sense, &ccb->ccb_sense,
761 sizeof(xs->sense.scsi_sense));
762 xs->error = XS_SENSE;
763 break;
764 case SCSI_BUSY:
765 case SCSI_QUEUE_FULL:
766 xs->error = XS_BUSY;
767 break;
768 default:
769 scsipi_printaddr(xs->xs_periph);
770 printf("SCSI status %x\n",
771 ccb->ccb_scsi_status);
772 xs->error = XS_DRIVER_STUFFUP;
773 break;
774 }
775 } else
776 xs->resid = 0;
777
778 xs->status = ccb->ccb_scsi_status;
779 }
780
781 /* Free up the CCB and mark the command as done. */
782 dpt_ccb_free(sc, ccb);
783 scsipi_done(xs);
784 }
785
786 /*
787 * Specified CCB has timed out, abort it.
788 */
789 static void
dpt_ccb_abort(struct dpt_softc * sc,struct dpt_ccb * ccb)790 dpt_ccb_abort(struct dpt_softc *sc, struct dpt_ccb *ccb)
791 {
792 struct scsipi_periph *periph;
793 struct scsipi_xfer *xs;
794 int s;
795
796 xs = ccb->ccb_xs;
797 periph = xs->xs_periph;
798
799 scsipi_printaddr(periph);
800 printf("timed out (status:%02x aux status:%02x)",
801 dpt_inb(sc, HA_STATUS), dpt_inb(sc, HA_AUX_STATUS));
802
803 s = splbio();
804
805 if ((ccb->ccb_flg & CCB_ABORT) != 0) {
806 /* Abort timed out, reset the HBA */
807 printf(" AGAIN, resetting HBA\n");
808 dpt_outb(sc, HA_COMMAND, CP_RESET);
809 DELAY(750000);
810 } else {
811 /* Abort the operation that has timed out */
812 printf("\n");
813 xs->error = XS_TIMEOUT;
814 ccb->ccb_timeout = DPT_ABORT_TIMEOUT;
815 ccb->ccb_flg |= CCB_ABORT;
816 /* Start the abort */
817 if (dpt_cmd(sc, ccb, CP_IMMEDIATE, CPI_SPEC_ABORT))
818 aprint_error_dev(sc->sc_dev, "dpt_cmd failed\n");
819 }
820
821 splx(s);
822 }
823
824 /*
825 * Map a data transfer.
826 */
827 static int
dpt_ccb_map(struct dpt_softc * sc,struct dpt_ccb * ccb)828 dpt_ccb_map(struct dpt_softc *sc, struct dpt_ccb *ccb)
829 {
830 struct scsipi_xfer *xs;
831 bus_dmamap_t xfer;
832 bus_dma_segment_t *ds;
833 struct eata_sg *sg;
834 struct eata_cp *cp;
835 int rv, i;
836
837 xs = ccb->ccb_xs;
838 xfer = ccb->ccb_dmamap_xfer;
839 cp = &ccb->ccb_eata_cp;
840
841 rv = bus_dmamap_load(sc->sc_dmat, xfer, xs->data, xs->datalen, NULL,
842 ((xs->xs_control & XS_CTL_NOSLEEP) != 0 ?
843 BUS_DMA_NOWAIT : BUS_DMA_WAITOK) | BUS_DMA_STREAMING |
844 ((xs->xs_control & XS_CTL_DATA_IN) ? BUS_DMA_READ : BUS_DMA_WRITE));
845
846 switch (rv) {
847 case 0:
848 break;
849 case ENOMEM:
850 case EAGAIN:
851 xs->error = XS_RESOURCE_SHORTAGE;
852 break;
853 default:
854 xs->error = XS_DRIVER_STUFFUP;
855 printf("%s: error %d loading map\n", device_xname(sc->sc_dev), rv);
856 break;
857 }
858
859 if (xs->error != XS_NOERROR) {
860 dpt_ccb_free(sc, ccb);
861 scsipi_done(xs);
862 return (-1);
863 }
864
865 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
866 (xs->xs_control & XS_CTL_DATA_IN) != 0 ? BUS_DMASYNC_PREREAD :
867 BUS_DMASYNC_PREWRITE);
868
869 /* Don't bother using scatter/gather for just 1 seg */
870 if (xfer->dm_nsegs == 1) {
871 cp->cp_dataaddr = htobe32(xfer->dm_segs[0].ds_addr);
872 cp->cp_datalen = htobe32(xfer->dm_segs[0].ds_len);
873 } else {
874 /*
875 * Load the hardware scatter/gather map with
876 * the contents of the DMA map.
877 */
878 sg = ccb->ccb_sg;
879 ds = xfer->dm_segs;
880 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
881 sg->sg_addr = htobe32(ds->ds_addr);
882 sg->sg_len = htobe32(ds->ds_len);
883 }
884 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
885 sc->sc_dmamap->dm_segs[0].ds_addr +
886 offsetof(struct dpt_ccb, ccb_sg));
887 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
888 cp->cp_ctl0 |= CP_C0_SCATTER;
889 }
890
891 return (0);
892 }
893
894 /*
895 * Unmap a transfer.
896 */
897 static void
dpt_ccb_unmap(struct dpt_softc * sc,struct dpt_ccb * ccb)898 dpt_ccb_unmap(struct dpt_softc *sc, struct dpt_ccb *ccb)
899 {
900
901 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
902 ccb->ccb_dmamap_xfer->dm_mapsize,
903 (ccb->ccb_eata_cp.cp_ctl0 & CP_C0_DATA_IN) != 0 ?
904 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
905 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);
906 }
907
908 /*
909 * Adjust the size of each I/O before it passes to the SCSI layer.
910 */
911 static void
dpt_minphys(struct buf * bp)912 dpt_minphys(struct buf *bp)
913 {
914
915 if (bp->b_bcount > DPT_MAX_XFER)
916 bp->b_bcount = DPT_MAX_XFER;
917 minphys(bp);
918 }
919
920 /*
921 * Start a SCSI command.
922 */
923 static void
dpt_scsipi_request(struct scsipi_channel * chan,scsipi_adapter_req_t req,void * arg)924 dpt_scsipi_request(struct scsipi_channel *chan, scsipi_adapter_req_t req,
925 void *arg)
926 {
927 struct dpt_softc *sc;
928 struct scsipi_xfer *xs;
929 int flags;
930 struct scsipi_periph *periph;
931 struct dpt_ccb *ccb;
932 struct eata_cp *cp;
933
934 sc = device_private(chan->chan_adapter->adapt_dev);
935
936 switch (req) {
937 case ADAPTER_REQ_RUN_XFER:
938 xs = arg;
939 periph = xs->xs_periph;
940 flags = xs->xs_control;
941
942 #ifdef DIAGNOSTIC
943 /* Cmds must be no more than 12 bytes for us. */
944 if (xs->cmdlen > 12) {
945 xs->error = XS_DRIVER_STUFFUP;
946 scsipi_done(xs);
947 break;
948 }
949 #endif
950 /*
951 * XXX We can't reset devices just yet. Apparently some
952 * older firmware revisions don't even support it.
953 */
954 if ((flags & XS_CTL_RESET) != 0) {
955 xs->error = XS_DRIVER_STUFFUP;
956 scsipi_done(xs);
957 break;
958 }
959
960 /*
961 * Get a CCB and fill it.
962 */
963 ccb = dpt_ccb_alloc(sc);
964 ccb->ccb_xs = xs;
965 ccb->ccb_timeout = xs->timeout;
966
967 cp = &ccb->ccb_eata_cp;
968 memcpy(&cp->cp_cdb_cmd, xs->cmd, xs->cmdlen);
969 cp->cp_ccbid = ccb->ccb_id;
970 cp->cp_senselen = sizeof(ccb->ccb_sense);
971 cp->cp_stataddr = htobe32(sc->sc_stppa);
972 cp->cp_ctl0 = CP_C0_AUTO_SENSE;
973 cp->cp_ctl1 = 0;
974 cp->cp_ctl2 = 0;
975 cp->cp_ctl3 = periph->periph_target << CP_C3_ID_SHIFT;
976 cp->cp_ctl3 |= chan->chan_channel << CP_C3_CHANNEL_SHIFT;
977 cp->cp_ctl4 = periph->periph_lun << CP_C4_LUN_SHIFT;
978 cp->cp_ctl4 |= CP_C4_DIS_PRI | CP_C4_IDENTIFY;
979
980 if ((flags & XS_CTL_DATA_IN) != 0)
981 cp->cp_ctl0 |= CP_C0_DATA_IN;
982 if ((flags & XS_CTL_DATA_OUT) != 0)
983 cp->cp_ctl0 |= CP_C0_DATA_OUT;
984 if (sc->sc_hbaid[chan->chan_channel] == periph->periph_target)
985 cp->cp_ctl0 |= CP_C0_INTERPRET;
986
987 /* Synchronous xfers musn't write-back through the cache. */
988 if (xs->bp != NULL)
989 if ((xs->bp->b_flags & (B_ASYNC | B_READ)) == 0)
990 cp->cp_ctl2 |= CP_C2_NO_CACHE;
991
992 cp->cp_senseaddr =
993 htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
994 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
995
996 if (xs->datalen != 0) {
997 if (dpt_ccb_map(sc, ccb))
998 break;
999 } else {
1000 cp->cp_dataaddr = 0;
1001 cp->cp_datalen = 0;
1002 }
1003
1004 /* Sync up CCB and status packet. */
1005 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1006 CCB_OFF(sc, ccb), sizeof(struct dpt_ccb),
1007 BUS_DMASYNC_PREWRITE);
1008 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1009 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1010
1011 /*
1012 * Start the command.
1013 */
1014 if ((xs->xs_control & XS_CTL_POLL) != 0)
1015 ccb->ccb_flg |= CCB_PRIVATE;
1016
1017 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0)) {
1018 aprint_error_dev(sc->sc_dev, "dpt_cmd failed\n");
1019 xs->error = XS_DRIVER_STUFFUP;
1020 if (xs->datalen != 0)
1021 dpt_ccb_unmap(sc, ccb);
1022 dpt_ccb_free(sc, ccb);
1023 break;
1024 }
1025
1026 if ((xs->xs_control & XS_CTL_POLL) == 0)
1027 break;
1028
1029 if (dpt_ccb_poll(sc, ccb)) {
1030 dpt_ccb_abort(sc, ccb);
1031 /* Wait for abort to complete... */
1032 if (dpt_ccb_poll(sc, ccb))
1033 dpt_ccb_abort(sc, ccb);
1034 }
1035
1036 dpt_ccb_done(sc, ccb);
1037 break;
1038
1039 case ADAPTER_REQ_GROW_RESOURCES:
1040 /*
1041 * Not supported, since we allocate the maximum number of
1042 * CCBs up front.
1043 */
1044 break;
1045
1046 case ADAPTER_REQ_SET_XFER_MODE:
1047 /*
1048 * This will be handled by the HBA itself, and we can't
1049 * modify that (ditto for tagged queueing).
1050 */
1051 break;
1052 }
1053 }
1054
1055 /*
1056 * Get inquiry data from the adapter.
1057 */
1058 static void
dpt_hba_inquire(struct dpt_softc * sc,struct eata_inquiry_data ** ei)1059 dpt_hba_inquire(struct dpt_softc *sc, struct eata_inquiry_data **ei)
1060 {
1061 struct dpt_ccb *ccb;
1062 struct eata_cp *cp;
1063
1064 *ei = (struct eata_inquiry_data *)sc->sc_scr;
1065
1066 /* Get a CCB and mark as private */
1067 ccb = dpt_ccb_alloc(sc);
1068 ccb->ccb_flg |= CCB_PRIVATE;
1069 ccb->ccb_timeout = 200;
1070
1071 /* Put all the arguments into the CCB. */
1072 cp = &ccb->ccb_eata_cp;
1073 cp->cp_ccbid = ccb->ccb_id;
1074 cp->cp_senselen = sizeof(ccb->ccb_sense);
1075 cp->cp_senseaddr = 0;
1076 cp->cp_stataddr = htobe32(sc->sc_stppa);
1077 cp->cp_dataaddr = htobe32(sc->sc_scrpa);
1078 cp->cp_datalen = htobe32(sizeof(struct eata_inquiry_data));
1079 cp->cp_ctl0 = CP_C0_DATA_IN | CP_C0_INTERPRET;
1080 cp->cp_ctl1 = 0;
1081 cp->cp_ctl2 = 0;
1082 cp->cp_ctl3 = sc->sc_hbaid[0] << CP_C3_ID_SHIFT;
1083 cp->cp_ctl4 = CP_C4_DIS_PRI | CP_C4_IDENTIFY;
1084
1085 /* Put together the SCSI inquiry command. */
1086 memset(&cp->cp_cdb_cmd, 0, 12);
1087 cp->cp_cdb_cmd = INQUIRY;
1088 cp->cp_cdb_len = sizeof(struct eata_inquiry_data);
1089
1090 /* Sync up CCB, status packet and scratch area. */
1091 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1092 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1093 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1094 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1095 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1096 sizeof(struct eata_inquiry_data), BUS_DMASYNC_PREREAD);
1097
1098 /* Start the command and poll on completion. */
1099 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1100 panic("%s: dpt_cmd failed", device_xname(sc->sc_dev));
1101
1102 if (dpt_ccb_poll(sc, ccb))
1103 panic("%s: inquiry timed out", device_xname(sc->sc_dev));
1104
1105 if (ccb->ccb_hba_status != SP_HBA_NO_ERROR ||
1106 ccb->ccb_scsi_status != SCSI_OK)
1107 panic("%s: inquiry failed (hba:%02x scsi:%02x)",
1108 device_xname(sc->sc_dev), ccb->ccb_hba_status,
1109 ccb->ccb_scsi_status);
1110
1111 /* Sync up the DMA map and free CCB, returning. */
1112 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_scroff,
1113 sizeof(struct eata_inquiry_data), BUS_DMASYNC_POSTREAD);
1114 dpt_ccb_free(sc, ccb);
1115 }
1116
1117 int
dptopen(dev_t dev,int flag,int mode,struct lwp * l)1118 dptopen(dev_t dev, int flag, int mode, struct lwp *l)
1119 {
1120
1121 if (device_lookup(&dpt_cd, minor(dev)) == NULL)
1122 return (ENXIO);
1123
1124 return (0);
1125 }
1126
1127 int
dptioctl(dev_t dev,u_long cmd,void * data,int flag,struct lwp * l)1128 dptioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1129 {
1130 struct dpt_softc *sc;
1131 int rv;
1132
1133 sc = device_lookup_private(&dpt_cd, minor(dev));
1134
1135 switch (cmd & 0xffff) {
1136 case DPT_SIGNATURE:
1137 memcpy(data, &dpt_sig, uimin(IOCPARM_LEN(cmd), sizeof(dpt_sig)));
1138 break;
1139
1140 case DPT_CTRLINFO:
1141 dpt_ctlrinfo(sc, (struct dpt_eata_ctlrinfo *)data);
1142 break;
1143
1144 case DPT_SYSINFO:
1145 dpt_sysinfo(sc, (struct dpt_sysinfo *)data);
1146 break;
1147
1148 case DPT_BLINKLED:
1149 /*
1150 * XXX Don't know how to get this from EATA boards. I think
1151 * it involves waiting for a "DPT" sequence from HA_ERROR
1152 * and then reading one of the HA_ICMD registers.
1153 */
1154 *(int *)data = 0;
1155 break;
1156
1157 case DPT_EATAUSRCMD:
1158 rv = kauth_authorize_device_passthru(l->l_cred, dev,
1159 KAUTH_REQ_DEVICE_RAWIO_PASSTHRU_ALL, data);
1160 if (rv)
1161 return (rv);
1162
1163 if (IOCPARM_LEN(cmd) < sizeof(struct eata_ucp)) {
1164 DPRINTF(("%s: ucp %lu vs %lu bytes\n",
1165 device_xname(sc->sc_dev), IOCPARM_LEN(cmd),
1166 (unsigned long int)sizeof(struct eata_ucp)));
1167 return (EINVAL);
1168 }
1169
1170 mutex_enter(&sc->sc_lock);
1171 rv = dpt_passthrough(sc, (struct eata_ucp *)data, l);
1172 mutex_exit(&sc->sc_lock);
1173
1174 return (rv);
1175
1176 default:
1177 DPRINTF(("%s: unknown ioctl %lx\n", device_xname(sc->sc_dev), cmd));
1178 return (ENOTTY);
1179 }
1180
1181 return (0);
1182 }
1183
1184 void
dpt_ctlrinfo(struct dpt_softc * sc,struct dpt_eata_ctlrinfo * info)1185 dpt_ctlrinfo(struct dpt_softc *sc, struct dpt_eata_ctlrinfo *info)
1186 {
1187
1188 memset(info, 0, sizeof(*info));
1189 info->id = sc->sc_hbaid[0];
1190 info->vect = sc->sc_isairq;
1191 info->base = sc->sc_isaport;
1192 info->qdepth = sc->sc_nccbs;
1193 info->sgsize = DPT_SG_SIZE * sizeof(struct eata_sg);
1194 info->heads = 16;
1195 info->sectors = 63;
1196 info->do_drive32 = 1;
1197 info->primary = 1;
1198 info->cpLength = sizeof(struct eata_cp);
1199 info->spLength = sizeof(struct eata_sp);
1200 info->drqNum = sc->sc_isadrq;
1201 }
1202
1203 void
dpt_sysinfo(struct dpt_softc * sc,struct dpt_sysinfo * info)1204 dpt_sysinfo(struct dpt_softc *sc, struct dpt_sysinfo *info)
1205 {
1206 #ifdef i386
1207 int i, j;
1208 #endif
1209
1210 memset(info, 0, sizeof(*info));
1211
1212 #ifdef i386
1213 outb (0x70, 0x12);
1214 i = inb(0x71);
1215 j = i >> 4;
1216 if (i == 0x0f) {
1217 outb (0x70, 0x19);
1218 j = inb (0x71);
1219 }
1220 info->drive0CMOS = j;
1221
1222 j = i & 0x0f;
1223 if (i == 0x0f) {
1224 outb (0x70, 0x1a);
1225 j = inb (0x71);
1226 }
1227 info->drive1CMOS = j;
1228 info->processorFamily = dpt_sig.dsProcessorFamily;
1229
1230 /*
1231 * Get the conventional memory size from CMOS.
1232 */
1233 outb(0x70, 0x16);
1234 j = inb(0x71);
1235 j <<= 8;
1236 outb(0x70, 0x15);
1237 j |= inb(0x71);
1238 info->conventionalMemSize = j;
1239
1240 /*
1241 * Get the extended memory size from CMOS.
1242 */
1243 outb(0x70, 0x31);
1244 j = inb(0x71);
1245 j <<= 8;
1246 outb(0x70, 0x30);
1247 j |= inb(0x71);
1248 info->extendedMemSize = j;
1249
1250 switch (cpu_class) {
1251 case CPUCLASS_386:
1252 info->processorType = PROC_386;
1253 break;
1254 case CPUCLASS_486:
1255 info->processorType = PROC_486;
1256 break;
1257 case CPUCLASS_586:
1258 info->processorType = PROC_PENTIUM;
1259 break;
1260 case CPUCLASS_686:
1261 default:
1262 info->processorType = PROC_SEXIUM;
1263 break;
1264 }
1265
1266 info->flags = SI_CMOS_Valid | SI_BusTypeValid |
1267 SI_MemorySizeValid | SI_NO_SmartROM;
1268 #else
1269 info->flags = SI_BusTypeValid | SI_NO_SmartROM;
1270 #endif
1271
1272 info->busType = sc->sc_bustype;
1273 }
1274
1275 int
dpt_passthrough(struct dpt_softc * sc,struct eata_ucp * ucp,struct lwp * l)1276 dpt_passthrough(struct dpt_softc *sc, struct eata_ucp *ucp, struct lwp *l)
1277 {
1278 struct dpt_ccb *ccb;
1279 struct eata_sp sp;
1280 struct eata_cp *cp;
1281 struct eata_sg *sg;
1282 bus_dmamap_t xfer = 0; /* XXX: gcc */
1283 bus_dma_segment_t *ds;
1284 int datain = 0, s, rv = 0, i, uslen; /* XXX: gcc */
1285
1286 /*
1287 * Get a CCB and fill.
1288 */
1289 ccb = dpt_ccb_alloc(sc);
1290 ccb->ccb_flg |= CCB_PRIVATE | CCB_WAIT;
1291 ccb->ccb_timeout = 0;
1292 ccb->ccb_savesp = &sp;
1293
1294 cp = &ccb->ccb_eata_cp;
1295 memcpy(cp, ucp->ucp_cp, sizeof(ucp->ucp_cp));
1296 uslen = cp->cp_senselen;
1297 cp->cp_ccbid = ccb->ccb_id;
1298 cp->cp_senselen = sizeof(ccb->ccb_sense);
1299 cp->cp_senseaddr = htobe32(sc->sc_dmamap->dm_segs[0].ds_addr +
1300 CCB_OFF(sc, ccb) + offsetof(struct dpt_ccb, ccb_sense));
1301 cp->cp_stataddr = htobe32(sc->sc_stppa);
1302
1303 /*
1304 * Map data transfers.
1305 */
1306 if (ucp->ucp_dataaddr && ucp->ucp_datalen) {
1307 xfer = ccb->ccb_dmamap_xfer;
1308 datain = ((cp->cp_ctl0 & CP_C0_DATA_IN) != 0);
1309
1310 if (ucp->ucp_datalen > DPT_MAX_XFER) {
1311 DPRINTF(("%s: xfer too big\n", device_xname(sc->sc_dev)));
1312 dpt_ccb_free(sc, ccb);
1313 return (EFBIG);
1314 }
1315 rv = bus_dmamap_load(sc->sc_dmat, xfer,
1316 ucp->ucp_dataaddr, ucp->ucp_datalen, l->l_proc,
1317 BUS_DMA_WAITOK | BUS_DMA_STREAMING |
1318 (datain ? BUS_DMA_READ : BUS_DMA_WRITE));
1319 if (rv != 0) {
1320 DPRINTF(("%s: map failed; %d\n", device_xname(sc->sc_dev),
1321 rv));
1322 dpt_ccb_free(sc, ccb);
1323 return (rv);
1324 }
1325
1326 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1327 (datain ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1328
1329 sg = ccb->ccb_sg;
1330 ds = xfer->dm_segs;
1331 for (i = 0; i < xfer->dm_nsegs; i++, sg++, ds++) {
1332 sg->sg_addr = htobe32(ds->ds_addr);
1333 sg->sg_len = htobe32(ds->ds_len);
1334 }
1335 cp->cp_dataaddr = htobe32(CCB_OFF(sc, ccb) +
1336 sc->sc_dmamap->dm_segs[0].ds_addr +
1337 offsetof(struct dpt_ccb, ccb_sg));
1338 cp->cp_datalen = htobe32(i * sizeof(struct eata_sg));
1339 cp->cp_ctl0 |= CP_C0_SCATTER;
1340 } else {
1341 cp->cp_dataaddr = 0;
1342 cp->cp_datalen = 0;
1343 }
1344
1345 /*
1346 * Start the command and sleep on completion.
1347 */
1348 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1349 sizeof(struct dpt_ccb), BUS_DMASYNC_PREWRITE);
1350 s = splbio();
1351 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, sc->sc_stpoff,
1352 sizeof(struct eata_sp), BUS_DMASYNC_PREREAD);
1353 if (dpt_cmd(sc, ccb, CP_DMA_CMD, 0))
1354 panic("%s: dpt_cmd failed", device_xname(sc->sc_dev));
1355 tsleep(ccb, PWAIT, "dptucmd", 0);
1356 splx(s);
1357
1358 /*
1359 * Sync up the DMA map and copy out results.
1360 */
1361 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, CCB_OFF(sc, ccb),
1362 sizeof(struct dpt_ccb), BUS_DMASYNC_POSTWRITE);
1363
1364 if (cp->cp_datalen != 0) {
1365 bus_dmamap_sync(sc->sc_dmat, xfer, 0, xfer->dm_mapsize,
1366 (datain ? BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE));
1367 bus_dmamap_unload(sc->sc_dmat, xfer);
1368 }
1369
1370 if (ucp->ucp_stataddr != NULL) {
1371 rv = copyout(&sp, ucp->ucp_stataddr, sizeof(sp));
1372 if (rv != 0) {
1373 DPRINTF(("%s: sp copyout() failed\n",
1374 device_xname(sc->sc_dev)));
1375 }
1376 }
1377 if (rv == 0 && ucp->ucp_senseaddr != NULL) {
1378 i = uimin(uslen, sizeof(ccb->ccb_sense));
1379 rv = copyout(&ccb->ccb_sense, ucp->ucp_senseaddr, i);
1380 if (rv != 0) {
1381 DPRINTF(("%s: sense copyout() failed\n",
1382 device_xname(sc->sc_dev)));
1383 }
1384 }
1385
1386 ucp->ucp_hstatus = (u_int8_t)ccb->ccb_hba_status;
1387 ucp->ucp_tstatus = (u_int8_t)ccb->ccb_scsi_status;
1388 dpt_ccb_free(sc, ccb);
1389 return (rv);
1390 }
1391