1 /* $OpenBSD: ips.c,v 1.136 2024/05/24 06:02:57 jsg Exp $ */
2
3 /*
4 * Copyright (c) 2006, 2007, 2009 Alexander Yurchenko <grange@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19 /*
20 * IBM (Adaptec) ServeRAID controllers driver.
21 */
22
23 #include "bio.h"
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/device.h>
28 #include <sys/ioctl.h>
29 #include <sys/malloc.h>
30 #include <sys/sensors.h>
31 #include <sys/timeout.h>
32 #include <sys/queue.h>
33
34 #include <machine/bus.h>
35
36 #include <scsi/scsi_all.h>
37 #include <scsi/scsi_disk.h>
38 #include <scsi/scsiconf.h>
39
40 #include <dev/biovar.h>
41
42 #include <dev/pci/pcidevs.h>
43 #include <dev/pci/pcireg.h>
44 #include <dev/pci/pcivar.h>
45
46 /* Debug levels */
47 #define IPS_D_ERR 0x0001 /* errors */
48 #define IPS_D_INFO 0x0002 /* information */
49 #define IPS_D_XFER 0x0004 /* transfers */
50
51 #ifdef IPS_DEBUG
52 #define DPRINTF(a, b) do { if (ips_debug & (a)) printf b; } while (0)
53 int ips_debug = IPS_D_ERR;
54 #else
55 #define DPRINTF(a, b)
56 #endif
57
58 #define IPS_MAXDRIVES 8
59 #define IPS_MAXCHANS 4
60 #define IPS_MAXTARGETS 16
61 #define IPS_MAXCHUNKS 16
62 #define IPS_MAXCMDS 128
63
64 #define IPS_MAXSGS 16
65 #define IPS_MAXCDB 12
66
67 #define IPS_SECSZ 512
68 #define IPS_NVRAMPGSZ 128
69 #define IPS_SQSZ (IPS_MAXCMDS * sizeof(u_int32_t))
70
71 #define IPS_TIMEOUT 60000 /* ms */
72
73 /* Command codes */
74 #define IPS_CMD_READ 0x02
75 #define IPS_CMD_WRITE 0x03
76 #define IPS_CMD_DCDB 0x04
77 #define IPS_CMD_GETADAPTERINFO 0x05
78 #define IPS_CMD_FLUSH 0x0a
79 #define IPS_CMD_REBUILDSTATUS 0x0c
80 #define IPS_CMD_SETSTATE 0x10
81 #define IPS_CMD_REBUILD 0x16
82 #define IPS_CMD_ERRORTABLE 0x17
83 #define IPS_CMD_GETDRIVEINFO 0x19
84 #define IPS_CMD_RESETCHAN 0x1a
85 #define IPS_CMD_DOWNLOAD 0x20
86 #define IPS_CMD_RWBIOSFW 0x22
87 #define IPS_CMD_READCONF 0x38
88 #define IPS_CMD_GETSUBSYS 0x40
89 #define IPS_CMD_CONFIGSYNC 0x58
90 #define IPS_CMD_READ_SG 0x82
91 #define IPS_CMD_WRITE_SG 0x83
92 #define IPS_CMD_DCDB_SG 0x84
93 #define IPS_CMD_EDCDB 0x95
94 #define IPS_CMD_EDCDB_SG 0x96
95 #define IPS_CMD_RWNVRAMPAGE 0xbc
96 #define IPS_CMD_GETVERINFO 0xc6
97 #define IPS_CMD_FFDC 0xd7
98 #define IPS_CMD_SG 0x80
99 #define IPS_CMD_RWNVRAM 0xbc
100
101 /* DCDB attributes */
102 #define IPS_DCDB_DATAIN 0x01 /* data input */
103 #define IPS_DCDB_DATAOUT 0x02 /* data output */
104 #define IPS_DCDB_XFER64K 0x08 /* 64K transfer */
105 #define IPS_DCDB_TIMO10 0x10 /* 10 secs timeout */
106 #define IPS_DCDB_TIMO60 0x20 /* 60 secs timeout */
107 #define IPS_DCDB_TIMO20M 0x30 /* 20 mins timeout */
108 #define IPS_DCDB_NOAUTOREQSEN 0x40 /* no auto request sense */
109 #define IPS_DCDB_DISCON 0x80 /* disconnect allowed */
110
111 /* Register definitions */
112 #define IPS_REG_HIS 0x08 /* host interrupt status */
113 #define IPS_REG_HIS_SCE 0x01 /* status channel enqueue */
114 #define IPS_REG_HIS_EN 0x80 /* enable interrupts */
115 #define IPS_REG_CCSA 0x10 /* command channel system address */
116 #define IPS_REG_CCC 0x14 /* command channel control */
117 #define IPS_REG_CCC_SEM 0x0008 /* semaphore */
118 #define IPS_REG_CCC_START 0x101a /* start command */
119 #define IPS_REG_SQH 0x20 /* status queue head */
120 #define IPS_REG_SQT 0x24 /* status queue tail */
121 #define IPS_REG_SQE 0x28 /* status queue end */
122 #define IPS_REG_SQS 0x2c /* status queue start */
123
124 #define IPS_REG_OIS 0x30 /* outbound interrupt status */
125 #define IPS_REG_OIS_PEND 0x0008 /* interrupt is pending */
126 #define IPS_REG_OIM 0x34 /* outbound interrupt mask */
127 #define IPS_REG_OIM_DS 0x0008 /* disable interrupts */
128 #define IPS_REG_IQP 0x40 /* inbound queue port */
129 #define IPS_REG_OQP 0x44 /* outbound queue port */
130
131 /* Status word fields */
132 #define IPS_STAT_ID(x) (((x) >> 8) & 0xff) /* command id */
133 #define IPS_STAT_BASIC(x) (((x) >> 16) & 0xff) /* basic status */
134 #define IPS_STAT_EXT(x) (((x) >> 24) & 0xff) /* ext status */
135 #define IPS_STAT_GSC(x) ((x) & 0x0f)
136
137 /* Basic status codes */
138 #define IPS_STAT_OK 0x00 /* success */
139 #define IPS_STAT_RECOV 0x01 /* recovered error */
140 #define IPS_STAT_INVOP 0x03 /* invalid opcode */
141 #define IPS_STAT_INVCMD 0x04 /* invalid command block */
142 #define IPS_STAT_INVPARM 0x05 /* invalid parameters block */
143 #define IPS_STAT_BUSY 0x08 /* busy */
144 #define IPS_STAT_CMPLERR 0x0c /* completed with error */
145 #define IPS_STAT_LDERR 0x0d /* logical drive error */
146 #define IPS_STAT_TIMO 0x0e /* timeout */
147 #define IPS_STAT_PDRVERR 0x0f /* physical drive error */
148
149 /* Extended status codes */
150 #define IPS_ESTAT_SELTIMO 0xf0 /* select timeout */
151 #define IPS_ESTAT_OURUN 0xf2 /* over/underrun */
152 #define IPS_ESTAT_HOSTRST 0xf7 /* host reset */
153 #define IPS_ESTAT_DEVRST 0xf8 /* device reset */
154 #define IPS_ESTAT_RECOV 0xfc /* recovered error */
155 #define IPS_ESTAT_CKCOND 0xff /* check condition */
156
157 #define IPS_IOSIZE 128 /* max space size to map */
158
159 /* Command frame */
160 struct ips_cmd {
161 u_int8_t code;
162 u_int8_t id;
163 u_int8_t drive;
164 u_int8_t sgcnt;
165 u_int32_t lba;
166 u_int32_t sgaddr;
167 u_int16_t seccnt;
168 u_int8_t seg4g;
169 u_int8_t esg;
170 u_int32_t ccsar;
171 u_int32_t cccr;
172 };
173
174 /* Direct CDB (SCSI pass-through) frame */
175 struct ips_dcdb {
176 u_int8_t device;
177 u_int8_t attr;
178 u_int16_t datalen;
179 u_int32_t sgaddr;
180 u_int8_t cdblen;
181 u_int8_t senselen;
182 u_int8_t sgcnt;
183 u_int8_t __reserved1;
184 u_int8_t cdb[IPS_MAXCDB];
185 u_int8_t sense[64];
186 u_int8_t status;
187 u_int8_t __reserved2[3];
188 };
189
190 /* Scatter-gather array element */
191 struct ips_sg {
192 u_int32_t addr;
193 u_int32_t size;
194 };
195
196 /* Command block */
197 struct ips_cmdb {
198 struct ips_cmd cmd;
199 struct ips_dcdb dcdb;
200 struct ips_sg sg[IPS_MAXSGS];
201 };
202
203 /* Data frames */
204 struct ips_adapterinfo {
205 u_int8_t drivecnt;
206 u_int8_t miscflag;
207 u_int8_t sltflag;
208 u_int8_t bstflag;
209 u_int8_t pwrchgcnt;
210 u_int8_t wrongaddrcnt;
211 u_int8_t unidentcnt;
212 u_int8_t nvramdevchgcnt;
213 u_int8_t firmware[8];
214 u_int8_t bios[8];
215 u_int32_t drivesize[IPS_MAXDRIVES];
216 u_int8_t cmdcnt;
217 u_int8_t maxphysdevs;
218 u_int16_t flashrepgmcnt;
219 u_int8_t defunctdiskcnt;
220 u_int8_t rebuildflag;
221 u_int8_t offdrivecnt;
222 u_int8_t critdrivecnt;
223 u_int16_t confupdcnt;
224 u_int8_t blkflag;
225 u_int8_t __reserved;
226 u_int16_t deaddisk[IPS_MAXCHANS][IPS_MAXTARGETS];
227 };
228
229 struct ips_driveinfo {
230 u_int8_t drivecnt;
231 u_int8_t __reserved[3];
232 struct ips_drive {
233 u_int8_t id;
234 u_int8_t __reserved;
235 u_int8_t raid;
236 u_int8_t state;
237 #define IPS_DS_FREE 0x00
238 #define IPS_DS_OFFLINE 0x02
239 #define IPS_DS_ONLINE 0x03
240 #define IPS_DS_DEGRADED 0x04
241 #define IPS_DS_SYS 0x06
242 #define IPS_DS_CRS 0x24
243
244 u_int32_t seccnt;
245 } drive[IPS_MAXDRIVES];
246 };
247
248 struct ips_conf {
249 u_int8_t ldcnt;
250 u_int8_t day;
251 u_int8_t month;
252 u_int8_t year;
253 u_int8_t initid[4];
254 u_int8_t hostid[12];
255 u_int8_t time[8];
256 u_int32_t useropt;
257 u_int16_t userfield;
258 u_int8_t rebuildrate;
259 u_int8_t __reserved1;
260
261 struct ips_hw {
262 u_int8_t board[8];
263 u_int8_t cpu[8];
264 u_int8_t nchantype;
265 u_int8_t nhostinttype;
266 u_int8_t compression;
267 u_int8_t nvramtype;
268 u_int32_t nvramsize;
269 } hw;
270
271 struct ips_ld {
272 u_int16_t userfield;
273 u_int8_t state;
274 u_int8_t raidcacheparam;
275 u_int8_t chunkcnt;
276 u_int8_t stripesize;
277 u_int8_t params;
278 u_int8_t __reserved;
279 u_int32_t size;
280
281 struct ips_chunk {
282 u_int8_t channel;
283 u_int8_t target;
284 u_int16_t __reserved;
285 u_int32_t startsec;
286 u_int32_t seccnt;
287 } chunk[IPS_MAXCHUNKS];
288 } ld[IPS_MAXDRIVES];
289
290 struct ips_dev {
291 u_int8_t initiator;
292 u_int8_t params;
293 u_int8_t miscflag;
294 u_int8_t state;
295 #define IPS_DVS_STANDBY 0x01
296 #define IPS_DVS_REBUILD 0x02
297 #define IPS_DVS_SPARE 0x04
298 #define IPS_DVS_MEMBER 0x08
299 #define IPS_DVS_ONLINE 0x80
300 #define IPS_DVS_READY (IPS_DVS_STANDBY | IPS_DVS_ONLINE)
301
302 u_int32_t seccnt;
303 u_int8_t devid[28];
304 } dev[IPS_MAXCHANS][IPS_MAXTARGETS];
305
306 u_int8_t reserved[512];
307 };
308
309 struct ips_rblstat {
310 u_int8_t __unknown[20];
311 struct {
312 u_int8_t __unknown[4];
313 u_int32_t total;
314 u_int32_t remain;
315 } ld[IPS_MAXDRIVES];
316 };
317
318 struct ips_pg5 {
319 u_int32_t signature;
320 u_int8_t __reserved1;
321 u_int8_t slot;
322 u_int16_t type;
323 u_int8_t bioshi[4];
324 u_int8_t bioslo[4];
325 u_int16_t __reserved2;
326 u_int8_t __reserved3;
327 u_int8_t os;
328 u_int8_t driverhi[4];
329 u_int8_t driverlo[4];
330 u_int8_t __reserved4[100];
331 };
332
333 struct ips_info {
334 struct ips_adapterinfo adapter;
335 struct ips_driveinfo drive;
336 struct ips_conf conf;
337 struct ips_rblstat rblstat;
338 struct ips_pg5 pg5;
339 };
340
341 /* Command control block */
342 struct ips_softc;
343 struct ips_ccb {
344 struct ips_softc * c_sc; /* driver softc */
345 int c_id; /* command id */
346 int c_flags; /* SCSI_* flags */
347 enum {
348 IPS_CCB_FREE,
349 IPS_CCB_QUEUED,
350 IPS_CCB_DONE
351 } c_state; /* command state */
352
353 void * c_cmdbva; /* command block virt addr */
354 paddr_t c_cmdbpa; /* command block phys addr */
355 bus_dmamap_t c_dmam; /* data buffer DMA map */
356
357 struct scsi_xfer * c_xfer; /* corresponding SCSI xfer */
358
359 u_int8_t c_stat; /* status byte copy */
360 u_int8_t c_estat; /* ext status byte copy */
361 int c_error; /* completion error */
362
363 void (*c_done)(struct ips_softc *, /* cmd done */
364 struct ips_ccb *); /* callback */
365
366 SLIST_ENTRY(ips_ccb) c_link; /* queue link */
367 };
368
369 /* CCB queue */
370 SLIST_HEAD(ips_ccbq, ips_ccb);
371
372 /* DMA-able chunk of memory */
373 struct dmamem {
374 bus_dma_tag_t dm_tag;
375 bus_dmamap_t dm_map;
376 bus_dma_segment_t dm_seg;
377 bus_size_t dm_size;
378 void * dm_vaddr;
379 #define dm_paddr dm_seg.ds_addr
380 };
381
382 struct ips_softc {
383 struct device sc_dev;
384
385 struct scsibus_softc * sc_scsibus;
386
387 struct ips_pt {
388 struct ips_softc * pt_sc;
389 int pt_chan;
390
391 int pt_proctgt;
392 char pt_procdev[16];
393 } sc_pt[IPS_MAXCHANS];
394
395 struct ksensordev sc_sensordev;
396 struct ksensor * sc_sensors;
397
398 bus_space_tag_t sc_iot;
399 bus_space_handle_t sc_ioh;
400 bus_dma_tag_t sc_dmat;
401
402 const struct ips_chipset *sc_chip;
403
404 struct ips_info * sc_info;
405 struct dmamem sc_infom;
406
407 int sc_nunits;
408
409 struct dmamem sc_cmdbm;
410
411 struct ips_ccb * sc_ccb;
412 int sc_nccbs;
413 struct ips_ccbq sc_ccbq_free;
414 struct mutex sc_ccb_mtx;
415 struct scsi_iopool sc_iopool;
416
417 struct dmamem sc_sqm;
418 paddr_t sc_sqtail;
419 u_int32_t * sc_sqbuf;
420 int sc_sqidx;
421 };
422
423 int ips_match(struct device *, void *, void *);
424 void ips_attach(struct device *, struct device *, void *);
425
426 void ips_scsi_cmd(struct scsi_xfer *);
427 void ips_scsi_pt_cmd(struct scsi_xfer *);
428 int ips_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
429
430 #if NBIO > 0
431 int ips_ioctl(struct device *, u_long, caddr_t);
432 int ips_ioctl_inq(struct ips_softc *, struct bioc_inq *);
433 int ips_ioctl_vol(struct ips_softc *, struct bioc_vol *);
434 int ips_ioctl_disk(struct ips_softc *, struct bioc_disk *);
435 int ips_ioctl_setstate(struct ips_softc *, struct bioc_setstate *);
436 #endif
437
438 #ifndef SMALL_KERNEL
439 void ips_sensors(void *);
440 #endif
441
442 int ips_load_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
443 void ips_start_xs(struct ips_softc *, struct ips_ccb *, struct scsi_xfer *);
444
445 int ips_cmd(struct ips_softc *, struct ips_ccb *);
446 int ips_poll(struct ips_softc *, struct ips_ccb *);
447 void ips_done(struct ips_softc *, struct ips_ccb *);
448 void ips_done_xs(struct ips_softc *, struct ips_ccb *);
449 void ips_done_pt(struct ips_softc *, struct ips_ccb *);
450 void ips_done_mgmt(struct ips_softc *, struct ips_ccb *);
451 int ips_error(struct ips_softc *, struct ips_ccb *);
452 int ips_error_xs(struct ips_softc *, struct ips_ccb *);
453 int ips_intr(void *);
454 void ips_timeout(void *);
455
456 int ips_getadapterinfo(struct ips_softc *, int);
457 int ips_getdriveinfo(struct ips_softc *, int);
458 int ips_getconf(struct ips_softc *, int);
459 int ips_getpg5(struct ips_softc *, int);
460
461 #if NBIO > 0
462 int ips_getrblstat(struct ips_softc *, int);
463 int ips_setstate(struct ips_softc *, int, int, int, int);
464 int ips_rebuild(struct ips_softc *, int, int, int, int, int);
465 #endif
466
467 void ips_copperhead_exec(struct ips_softc *, struct ips_ccb *);
468 void ips_copperhead_intren(struct ips_softc *);
469 int ips_copperhead_isintr(struct ips_softc *);
470 u_int32_t ips_copperhead_status(struct ips_softc *);
471
472 void ips_morpheus_exec(struct ips_softc *, struct ips_ccb *);
473 void ips_morpheus_intren(struct ips_softc *);
474 int ips_morpheus_isintr(struct ips_softc *);
475 u_int32_t ips_morpheus_status(struct ips_softc *);
476
477 struct ips_ccb *ips_ccb_alloc(struct ips_softc *, int);
478 void ips_ccb_free(struct ips_softc *, struct ips_ccb *, int);
479 void *ips_ccb_get(void *);
480 void ips_ccb_put(void *, void *);
481
482 int ips_dmamem_alloc(struct dmamem *, bus_dma_tag_t, bus_size_t);
483 void ips_dmamem_free(struct dmamem *);
484
485 const struct cfattach ips_ca = {
486 sizeof(struct ips_softc),
487 ips_match,
488 ips_attach
489 };
490
491 struct cfdriver ips_cd = {
492 NULL, "ips", DV_DULL
493 };
494
495 static const struct scsi_adapter ips_switch = {
496 ips_scsi_cmd, NULL, NULL, NULL, ips_scsi_ioctl
497 };
498
499 static const struct scsi_adapter ips_pt_switch = {
500 ips_scsi_pt_cmd, NULL, NULL, NULL, NULL
501 };
502
503 static const struct pci_matchid ips_ids[] = {
504 { PCI_VENDOR_IBM, PCI_PRODUCT_IBM_SERVERAID },
505 { PCI_VENDOR_IBM, PCI_PRODUCT_IBM_SERVERAID2 },
506 { PCI_VENDOR_ADP2, PCI_PRODUCT_ADP2_SERVERAID }
507 };
508
509 static const struct ips_chipset {
510 enum {
511 IPS_CHIP_COPPERHEAD = 0,
512 IPS_CHIP_MORPHEUS
513 } ic_id;
514
515 int ic_bar;
516
517 void (*ic_exec)(struct ips_softc *, struct ips_ccb *);
518 void (*ic_intren)(struct ips_softc *);
519 int (*ic_isintr)(struct ips_softc *);
520 u_int32_t (*ic_status)(struct ips_softc *);
521 } ips_chips[] = {
522 {
523 IPS_CHIP_COPPERHEAD,
524 0x14,
525 ips_copperhead_exec,
526 ips_copperhead_intren,
527 ips_copperhead_isintr,
528 ips_copperhead_status
529 },
530 {
531 IPS_CHIP_MORPHEUS,
532 0x10,
533 ips_morpheus_exec,
534 ips_morpheus_intren,
535 ips_morpheus_isintr,
536 ips_morpheus_status
537 }
538 };
539
540 #define ips_exec(s, c) (s)->sc_chip->ic_exec((s), (c))
541 #define ips_intren(s) (s)->sc_chip->ic_intren((s))
542 #define ips_isintr(s) (s)->sc_chip->ic_isintr((s))
543 #define ips_status(s) (s)->sc_chip->ic_status((s))
544
545 static const char *ips_names[] = {
546 NULL,
547 NULL,
548 "II",
549 "onboard",
550 "onboard",
551 "3H",
552 "3L",
553 "4H",
554 "4M",
555 "4L",
556 "4Mx",
557 "4Lx",
558 "5i",
559 "5i",
560 "6M",
561 "6i",
562 "7t",
563 "7k",
564 "7M"
565 };
566
567 int
ips_match(struct device * parent,void * match,void * aux)568 ips_match(struct device *parent, void *match, void *aux)
569 {
570 return (pci_matchbyid(aux, ips_ids,
571 sizeof(ips_ids) / sizeof(ips_ids[0])));
572 }
573
574 void
ips_attach(struct device * parent,struct device * self,void * aux)575 ips_attach(struct device *parent, struct device *self, void *aux)
576 {
577 struct ips_softc *sc = (struct ips_softc *)self;
578 struct pci_attach_args *pa = aux;
579 struct ips_ccb ccb0;
580 struct scsibus_attach_args saa;
581 struct ips_adapterinfo *ai;
582 struct ips_driveinfo *di;
583 struct ips_pg5 *pg5;
584 pcireg_t maptype;
585 bus_size_t iosize;
586 pci_intr_handle_t ih;
587 const char *intrstr;
588 int type, i;
589
590 sc->sc_dmat = pa->pa_dmat;
591
592 /* Identify chipset */
593 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_IBM_SERVERAID)
594 sc->sc_chip = &ips_chips[IPS_CHIP_COPPERHEAD];
595 else
596 sc->sc_chip = &ips_chips[IPS_CHIP_MORPHEUS];
597
598 /* Map registers */
599 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_chip->ic_bar);
600 if (pci_mapreg_map(pa, sc->sc_chip->ic_bar, maptype, 0, &sc->sc_iot,
601 &sc->sc_ioh, NULL, &iosize, IPS_IOSIZE)) {
602 printf(": can't map regs\n");
603 return;
604 }
605
606 /* Allocate command buffer */
607 if (ips_dmamem_alloc(&sc->sc_cmdbm, sc->sc_dmat,
608 IPS_MAXCMDS * sizeof(struct ips_cmdb))) {
609 printf(": can't alloc cmd buffer\n");
610 goto fail1;
611 }
612
613 /* Allocate info buffer */
614 if (ips_dmamem_alloc(&sc->sc_infom, sc->sc_dmat,
615 sizeof(struct ips_info))) {
616 printf(": can't alloc info buffer\n");
617 goto fail2;
618 }
619 sc->sc_info = sc->sc_infom.dm_vaddr;
620 ai = &sc->sc_info->adapter;
621 di = &sc->sc_info->drive;
622 pg5 = &sc->sc_info->pg5;
623
624 /* Allocate status queue for the Copperhead chipset */
625 if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD) {
626 if (ips_dmamem_alloc(&sc->sc_sqm, sc->sc_dmat, IPS_SQSZ)) {
627 printf(": can't alloc status queue\n");
628 goto fail3;
629 }
630 sc->sc_sqtail = sc->sc_sqm.dm_paddr;
631 sc->sc_sqbuf = sc->sc_sqm.dm_vaddr;
632 sc->sc_sqidx = 0;
633 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQS,
634 sc->sc_sqm.dm_paddr);
635 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQE,
636 sc->sc_sqm.dm_paddr + IPS_SQSZ);
637 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH,
638 sc->sc_sqm.dm_paddr + sizeof(u_int32_t));
639 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT,
640 sc->sc_sqm.dm_paddr);
641 }
642
643 /* Bootstrap CCB queue */
644 sc->sc_nccbs = 1;
645 sc->sc_ccb = &ccb0;
646 bzero(&ccb0, sizeof(ccb0));
647 ccb0.c_cmdbva = sc->sc_cmdbm.dm_vaddr;
648 ccb0.c_cmdbpa = sc->sc_cmdbm.dm_paddr;
649 SLIST_INIT(&sc->sc_ccbq_free);
650 SLIST_INSERT_HEAD(&sc->sc_ccbq_free, &ccb0, c_link);
651 mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
652 scsi_iopool_init(&sc->sc_iopool, sc, ips_ccb_get, ips_ccb_put);
653
654 /* Get adapter info */
655 if (ips_getadapterinfo(sc, SCSI_NOSLEEP)) {
656 printf(": can't get adapter info\n");
657 goto fail4;
658 }
659
660 /* Get logical drives info */
661 if (ips_getdriveinfo(sc, SCSI_NOSLEEP)) {
662 printf(": can't get ld info\n");
663 goto fail4;
664 }
665 sc->sc_nunits = di->drivecnt;
666
667 /* Get configuration */
668 if (ips_getconf(sc, SCSI_NOSLEEP)) {
669 printf(": can't get config\n");
670 goto fail4;
671 }
672
673 /* Read NVRAM page 5 for additional info */
674 (void)ips_getpg5(sc, SCSI_NOSLEEP);
675
676 /* Initialize CCB queue */
677 sc->sc_nccbs = ai->cmdcnt;
678 if ((sc->sc_ccb = ips_ccb_alloc(sc, sc->sc_nccbs)) == NULL) {
679 printf(": can't alloc ccb queue\n");
680 goto fail4;
681 }
682 SLIST_INIT(&sc->sc_ccbq_free);
683 for (i = 0; i < sc->sc_nccbs; i++)
684 SLIST_INSERT_HEAD(&sc->sc_ccbq_free,
685 &sc->sc_ccb[i], c_link);
686
687 /* Install interrupt handler */
688 if (pci_intr_map(pa, &ih)) {
689 printf(": can't map interrupt\n");
690 goto fail5;
691 }
692 intrstr = pci_intr_string(pa->pa_pc, ih);
693 if (pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ips_intr, sc,
694 sc->sc_dev.dv_xname) == NULL) {
695 printf(": can't establish interrupt");
696 if (intrstr != NULL)
697 printf(" at %s", intrstr);
698 printf("\n");
699 goto fail5;
700 }
701 printf(": %s\n", intrstr);
702
703 /* Display adapter info */
704 printf("%s: ServeRAID", sc->sc_dev.dv_xname);
705 type = letoh16(pg5->type);
706 if (type < sizeof(ips_names) / sizeof(ips_names[0]) && ips_names[type])
707 printf(" %s", ips_names[type]);
708 printf(", FW %c%c%c%c%c%c%c", ai->firmware[0], ai->firmware[1],
709 ai->firmware[2], ai->firmware[3], ai->firmware[4], ai->firmware[5],
710 ai->firmware[6]);
711 printf(", BIOS %c%c%c%c%c%c%c", ai->bios[0], ai->bios[1], ai->bios[2],
712 ai->bios[3], ai->bios[4], ai->bios[5], ai->bios[6]);
713 printf(", %d cmds, %d LD%s", sc->sc_nccbs, sc->sc_nunits,
714 (sc->sc_nunits == 1 ? "" : "s"));
715 printf("\n");
716
717 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
718 saa.saa_adapter_buswidth = sc->sc_nunits;
719 saa.saa_adapter = &ips_switch;
720 saa.saa_adapter_softc = sc;
721 saa.saa_luns = 8;
722 if (sc->sc_nunits > 0)
723 saa.saa_openings = sc->sc_nccbs / sc->sc_nunits;
724 else
725 saa.saa_openings = 0;
726 saa.saa_pool = &sc->sc_iopool;
727 saa.saa_quirks = saa.saa_flags = 0;
728 saa.saa_wwpn = saa.saa_wwnn = 0;
729
730 sc->sc_scsibus = (struct scsibus_softc *)config_found(self, &saa,
731 scsiprint);
732
733 /* For each channel attach SCSI pass-through bus */
734 for (i = 0; i < IPS_MAXCHANS; i++) {
735 struct ips_pt *pt;
736 int target, lastarget;
737
738 pt = &sc->sc_pt[i];
739 pt->pt_sc = sc;
740 pt->pt_chan = i;
741 pt->pt_proctgt = -1;
742
743 /* Check if channel has any devices besides disks */
744 for (target = 0, lastarget = -1; target < IPS_MAXTARGETS;
745 target++) {
746 struct ips_dev *idev;
747 int type;
748
749 idev = &sc->sc_info->conf.dev[i][target];
750 type = idev->params & SID_TYPE;
751 if (idev->state && type != T_DIRECT) {
752 lastarget = target;
753 if (type == T_PROCESSOR ||
754 type == T_ENCLOSURE)
755 /* remember enclosure address */
756 pt->pt_proctgt = target;
757 }
758 }
759 if (lastarget == -1)
760 continue;
761
762 saa.saa_adapter = &ips_pt_switch;
763 saa.saa_adapter_softc = pt;
764 saa.saa_adapter_buswidth = lastarget + 1;
765 saa.saa_adapter_target = IPS_MAXTARGETS;
766 saa.saa_luns = 8;
767 saa.saa_openings = 1;
768 saa.saa_pool = &sc->sc_iopool;
769 saa.saa_quirks = saa.saa_flags = 0;
770 saa.saa_wwpn = saa.saa_wwnn = 0;
771
772 config_found(self, &saa, scsiprint);
773 }
774
775 /* Enable interrupts */
776 ips_intren(sc);
777
778 #if NBIO > 0
779 /* Install ioctl handler */
780 if (bio_register(&sc->sc_dev, ips_ioctl))
781 printf("%s: no ioctl support\n", sc->sc_dev.dv_xname);
782 #endif
783
784 #ifndef SMALL_KERNEL
785 /* Add sensors */
786 if ((sc->sc_sensors = mallocarray(sc->sc_nunits, sizeof(struct ksensor),
787 M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) {
788 printf(": can't alloc sensors\n");
789 return;
790 }
791 strlcpy(sc->sc_sensordev.xname, sc->sc_dev.dv_xname,
792 sizeof(sc->sc_sensordev.xname));
793 for (i = 0; i < sc->sc_nunits; i++) {
794 struct device *dev;
795
796 sc->sc_sensors[i].type = SENSOR_DRIVE;
797 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
798 dev = scsi_get_link(sc->sc_scsibus, i, 0)->device_softc;
799 strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
800 sizeof(sc->sc_sensors[i].desc));
801 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
802 }
803 if (sensor_task_register(sc, ips_sensors, 10) == NULL) {
804 printf(": no sensors support\n");
805 free(sc->sc_sensors, M_DEVBUF,
806 sc->sc_nunits * sizeof(struct ksensor));
807 return;
808 }
809 sensordev_install(&sc->sc_sensordev);
810 #endif /* !SMALL_KERNEL */
811
812 return;
813 fail5:
814 ips_ccb_free(sc, sc->sc_ccb, sc->sc_nccbs);
815 fail4:
816 if (sc->sc_chip->ic_id == IPS_CHIP_COPPERHEAD)
817 ips_dmamem_free(&sc->sc_sqm);
818 fail3:
819 ips_dmamem_free(&sc->sc_infom);
820 fail2:
821 ips_dmamem_free(&sc->sc_cmdbm);
822 fail1:
823 bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize);
824 }
825
826 void
ips_scsi_cmd(struct scsi_xfer * xs)827 ips_scsi_cmd(struct scsi_xfer *xs)
828 {
829 struct scsi_link *link = xs->sc_link;
830 struct ips_softc *sc = link->bus->sb_adapter_softc;
831 struct ips_driveinfo *di = &sc->sc_info->drive;
832 struct ips_drive *drive;
833 struct scsi_inquiry_data inq;
834 struct scsi_read_cap_data rcd;
835 struct scsi_sense_data sd;
836 struct scsi_rw *rw;
837 struct scsi_rw_10 *rw10;
838 struct ips_ccb *ccb = xs->io;
839 struct ips_cmd *cmd;
840 int target = link->target;
841 u_int32_t blkno, blkcnt;
842 int code;
843
844 DPRINTF(IPS_D_XFER, ("%s: ips_scsi_cmd: xs %p, target %d, "
845 "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, target,
846 xs->cmd.opcode, xs->flags));
847
848 if (target >= sc->sc_nunits || link->lun != 0) {
849 DPRINTF(IPS_D_INFO, ("%s: ips_scsi_cmd: invalid params "
850 "target %d, lun %d\n", sc->sc_dev.dv_xname,
851 target, link->lun));
852 xs->error = XS_DRIVER_STUFFUP;
853 scsi_done(xs);
854 return;
855 }
856
857 drive = &di->drive[target];
858 xs->error = XS_NOERROR;
859
860 /* Fake SCSI commands */
861 switch (xs->cmd.opcode) {
862 case READ_10:
863 case READ_COMMAND:
864 case WRITE_10:
865 case WRITE_COMMAND:
866 if (xs->cmdlen == sizeof(struct scsi_rw)) {
867 rw = (void *)&xs->cmd;
868 blkno = _3btol(rw->addr) &
869 (SRW_TOPADDR << 16 | 0xffff);
870 blkcnt = rw->length ? rw->length : 0x100;
871 } else {
872 rw10 = (void *)&xs->cmd;
873 blkno = _4btol(rw10->addr);
874 blkcnt = _2btol(rw10->length);
875 }
876
877 if (blkno >= letoh32(drive->seccnt) || blkno + blkcnt >
878 letoh32(drive->seccnt)) {
879 DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: invalid params "
880 "blkno %u, blkcnt %u\n", sc->sc_dev.dv_xname,
881 blkno, blkcnt));
882 xs->error = XS_DRIVER_STUFFUP;
883 break;
884 }
885
886 if (xs->flags & SCSI_DATA_IN)
887 code = IPS_CMD_READ;
888 else
889 code = IPS_CMD_WRITE;
890
891 ccb = xs->io;
892
893 cmd = ccb->c_cmdbva;
894 cmd->code = code;
895 cmd->drive = target;
896 cmd->lba = htole32(blkno);
897 cmd->seccnt = htole16(blkcnt);
898
899 if (ips_load_xs(sc, ccb, xs)) {
900 DPRINTF(IPS_D_ERR, ("%s: ips_scsi_cmd: ips_load_xs "
901 "failed\n", sc->sc_dev.dv_xname));
902 xs->error = XS_DRIVER_STUFFUP;
903 scsi_done(xs);
904 return;
905 }
906
907 if (cmd->sgcnt > 0)
908 cmd->code |= IPS_CMD_SG;
909
910 ccb->c_done = ips_done_xs;
911 ips_start_xs(sc, ccb, xs);
912 return;
913 case INQUIRY:
914 bzero(&inq, sizeof(inq));
915 inq.device = T_DIRECT;
916 inq.version = SCSI_REV_2;
917 inq.response_format = SID_SCSI2_RESPONSE;
918 inq.additional_length = SID_SCSI2_ALEN;
919 inq.flags |= SID_CmdQue;
920 strlcpy(inq.vendor, "IBM", sizeof(inq.vendor));
921 snprintf(inq.product, sizeof(inq.product),
922 "LD%d RAID%d", target, drive->raid);
923 strlcpy(inq.revision, "1.0", sizeof(inq.revision));
924 scsi_copy_internal_data(xs, &inq, sizeof(inq));
925 break;
926 case READ_CAPACITY:
927 bzero(&rcd, sizeof(rcd));
928 _lto4b(letoh32(drive->seccnt) - 1, rcd.addr);
929 _lto4b(IPS_SECSZ, rcd.length);
930 scsi_copy_internal_data(xs, &rcd, sizeof(rcd));
931 break;
932 case REQUEST_SENSE:
933 bzero(&sd, sizeof(sd));
934 sd.error_code = SSD_ERRCODE_CURRENT;
935 sd.flags = SKEY_NO_SENSE;
936 scsi_copy_internal_data(xs, &sd, sizeof(sd));
937 break;
938 case SYNCHRONIZE_CACHE:
939 cmd = ccb->c_cmdbva;
940 cmd->code = IPS_CMD_FLUSH;
941
942 ccb->c_done = ips_done_xs;
943 ips_start_xs(sc, ccb, xs);
944 return;
945 case PREVENT_ALLOW:
946 case START_STOP:
947 case TEST_UNIT_READY:
948 break;
949 default:
950 DPRINTF(IPS_D_INFO, ("%s: unsupported scsi command 0x%02x\n",
951 sc->sc_dev.dv_xname, xs->cmd.opcode));
952 xs->error = XS_DRIVER_STUFFUP;
953 }
954
955 scsi_done(xs);
956 }
957
958 void
ips_scsi_pt_cmd(struct scsi_xfer * xs)959 ips_scsi_pt_cmd(struct scsi_xfer *xs)
960 {
961 struct scsi_link *link = xs->sc_link;
962 struct ips_pt *pt = link->bus->sb_adapter_softc;
963 struct ips_softc *sc = pt->pt_sc;
964 struct device *dev = link->device_softc;
965 struct ips_ccb *ccb = xs->io;
966 struct ips_cmdb *cmdb;
967 struct ips_cmd *cmd;
968 struct ips_dcdb *dcdb;
969 int chan = pt->pt_chan, target = link->target;
970
971 DPRINTF(IPS_D_XFER, ("%s: ips_scsi_pt_cmd: xs %p, chan %d, target %d, "
972 "opcode 0x%02x, flags 0x%x\n", sc->sc_dev.dv_xname, xs, chan,
973 target, xs->cmd.opcode, xs->flags));
974
975 if (pt->pt_procdev[0] == '\0' && target == pt->pt_proctgt && dev)
976 strlcpy(pt->pt_procdev, dev->dv_xname, sizeof(pt->pt_procdev));
977
978 if (xs->cmdlen > IPS_MAXCDB) {
979 DPRINTF(IPS_D_ERR, ("%s: cmdlen %d too big\n",
980 sc->sc_dev.dv_xname, xs->cmdlen));
981
982 bzero(&xs->sense, sizeof(xs->sense));
983 xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
984 xs->sense.flags = SKEY_ILLEGAL_REQUEST;
985 xs->sense.add_sense_code = 0x20; /* illcmd, 0x24 illfield */
986 xs->error = XS_SENSE;
987 scsi_done(xs);
988 return;
989 }
990
991 xs->error = XS_NOERROR;
992
993 cmdb = ccb->c_cmdbva;
994 cmd = &cmdb->cmd;
995 dcdb = &cmdb->dcdb;
996
997 cmd->code = IPS_CMD_DCDB;
998
999 dcdb->device = (chan << 4) | target;
1000 if (xs->flags & SCSI_DATA_IN)
1001 dcdb->attr |= IPS_DCDB_DATAIN;
1002 if (xs->flags & SCSI_DATA_OUT)
1003 dcdb->attr |= IPS_DCDB_DATAOUT;
1004
1005 /*
1006 * Adjust timeout value to what controller supports. Make sure our
1007 * timeout will be fired after controller gives up.
1008 */
1009 if (xs->timeout <= 10000) {
1010 dcdb->attr |= IPS_DCDB_TIMO10;
1011 xs->timeout = 11000;
1012 } else if (xs->timeout <= 60000) {
1013 dcdb->attr |= IPS_DCDB_TIMO60;
1014 xs->timeout = 61000;
1015 } else {
1016 dcdb->attr |= IPS_DCDB_TIMO20M;
1017 xs->timeout = 20 * 60000 + 1000;
1018 }
1019
1020 dcdb->attr |= IPS_DCDB_DISCON;
1021 dcdb->datalen = htole16(xs->datalen);
1022 dcdb->cdblen = xs->cmdlen;
1023 dcdb->senselen = MIN(sizeof(xs->sense), sizeof(dcdb->sense));
1024 memcpy(dcdb->cdb, &xs->cmd, xs->cmdlen);
1025
1026 if (ips_load_xs(sc, ccb, xs)) {
1027 DPRINTF(IPS_D_ERR, ("%s: ips_scsi_pt_cmd: ips_load_xs "
1028 "failed\n", sc->sc_dev.dv_xname));
1029 xs->error = XS_DRIVER_STUFFUP;
1030 scsi_done(xs);
1031 return;
1032 }
1033 if (cmd->sgcnt > 0)
1034 cmd->code |= IPS_CMD_SG;
1035 dcdb->sgaddr = cmd->sgaddr;
1036 dcdb->sgcnt = cmd->sgcnt;
1037 cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb, dcdb));
1038 cmd->sgcnt = 0;
1039
1040 ccb->c_done = ips_done_pt;
1041 ips_start_xs(sc, ccb, xs);
1042 }
1043
1044 int
ips_scsi_ioctl(struct scsi_link * link,u_long cmd,caddr_t addr,int flag)1045 ips_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1046 {
1047 #if NBIO > 0
1048 return (ips_ioctl(link->bus->sb_adapter_softc, cmd, addr));
1049 #else
1050 return (ENOTTY);
1051 #endif
1052 }
1053
1054 #if NBIO > 0
1055 int
ips_ioctl(struct device * dev,u_long cmd,caddr_t addr)1056 ips_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1057 {
1058 struct ips_softc *sc = (struct ips_softc *)dev;
1059
1060 DPRINTF(IPS_D_INFO, ("%s: ips_ioctl: cmd %lu\n",
1061 sc->sc_dev.dv_xname, cmd));
1062
1063 switch (cmd) {
1064 case BIOCINQ:
1065 return (ips_ioctl_inq(sc, (struct bioc_inq *)addr));
1066 case BIOCVOL:
1067 return (ips_ioctl_vol(sc, (struct bioc_vol *)addr));
1068 case BIOCDISK:
1069 return (ips_ioctl_disk(sc, (struct bioc_disk *)addr));
1070 case BIOCSETSTATE:
1071 return (ips_ioctl_setstate(sc, (struct bioc_setstate *)addr));
1072 default:
1073 return (ENOTTY);
1074 }
1075 }
1076
1077 int
ips_ioctl_inq(struct ips_softc * sc,struct bioc_inq * bi)1078 ips_ioctl_inq(struct ips_softc *sc, struct bioc_inq *bi)
1079 {
1080 struct ips_conf *conf = &sc->sc_info->conf;
1081 int i;
1082
1083 strlcpy(bi->bi_dev, sc->sc_dev.dv_xname, sizeof(bi->bi_dev));
1084 bi->bi_novol = sc->sc_nunits;
1085 for (i = 0, bi->bi_nodisk = 0; i < sc->sc_nunits; i++)
1086 bi->bi_nodisk += conf->ld[i].chunkcnt;
1087
1088 DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_inq: novol %d, nodisk %d\n",
1089 bi->bi_dev, bi->bi_novol, bi->bi_nodisk));
1090
1091 return (0);
1092 }
1093
1094 int
ips_ioctl_vol(struct ips_softc * sc,struct bioc_vol * bv)1095 ips_ioctl_vol(struct ips_softc *sc, struct bioc_vol *bv)
1096 {
1097 struct ips_driveinfo *di = &sc->sc_info->drive;
1098 struct ips_conf *conf = &sc->sc_info->conf;
1099 struct ips_rblstat *rblstat = &sc->sc_info->rblstat;
1100 struct ips_ld *ld;
1101 int vid = bv->bv_volid;
1102 struct device *dv;
1103 int error, rebuild = 0;
1104 u_int32_t total = 0, done = 0;
1105
1106 if (vid >= sc->sc_nunits)
1107 return (EINVAL);
1108 if ((error = ips_getconf(sc, 0)))
1109 return (error);
1110 ld = &conf->ld[vid];
1111
1112 switch (ld->state) {
1113 case IPS_DS_ONLINE:
1114 bv->bv_status = BIOC_SVONLINE;
1115 break;
1116 case IPS_DS_DEGRADED:
1117 bv->bv_status = BIOC_SVDEGRADED;
1118 rebuild++;
1119 break;
1120 case IPS_DS_OFFLINE:
1121 bv->bv_status = BIOC_SVOFFLINE;
1122 break;
1123 default:
1124 bv->bv_status = BIOC_SVINVALID;
1125 }
1126
1127 if (rebuild && ips_getrblstat(sc, 0) == 0) {
1128 total = letoh32(rblstat->ld[vid].total);
1129 done = total - letoh32(rblstat->ld[vid].remain);
1130 if (total && total > done) {
1131 bv->bv_status = BIOC_SVREBUILD;
1132 bv->bv_percent = 100 * done / total;
1133 }
1134 }
1135
1136 bv->bv_size = (uint64_t)letoh32(ld->size) * IPS_SECSZ;
1137 bv->bv_level = di->drive[vid].raid;
1138 bv->bv_nodisk = ld->chunkcnt;
1139
1140 /* Associate all unused and spare drives with first volume */
1141 if (vid == 0) {
1142 struct ips_dev *dev;
1143 int chan, target;
1144
1145 for (chan = 0; chan < IPS_MAXCHANS; chan++)
1146 for (target = 0; target < IPS_MAXTARGETS; target++) {
1147 dev = &conf->dev[chan][target];
1148 if (dev->state && !(dev->state &
1149 IPS_DVS_MEMBER) &&
1150 (dev->params & SID_TYPE) == T_DIRECT)
1151 bv->bv_nodisk++;
1152 }
1153 }
1154
1155 dv = scsi_get_link(sc->sc_scsibus, vid, 0)->device_softc;
1156 strlcpy(bv->bv_dev, dv->dv_xname, sizeof(bv->bv_dev));
1157 strlcpy(bv->bv_vendor, "IBM", sizeof(bv->bv_vendor));
1158
1159 DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_vol: vid %d, state 0x%02x, "
1160 "total %u, done %u, size %llu, level %d, nodisk %d, dev %s\n",
1161 sc->sc_dev.dv_xname, vid, ld->state, total, done, bv->bv_size,
1162 bv->bv_level, bv->bv_nodisk, bv->bv_dev));
1163
1164 return (0);
1165 }
1166
1167 int
ips_ioctl_disk(struct ips_softc * sc,struct bioc_disk * bd)1168 ips_ioctl_disk(struct ips_softc *sc, struct bioc_disk *bd)
1169 {
1170 struct ips_conf *conf = &sc->sc_info->conf;
1171 struct ips_ld *ld;
1172 struct ips_chunk *chunk;
1173 struct ips_dev *dev;
1174 int vid = bd->bd_volid, did = bd->bd_diskid;
1175 int chan, target, error, i;
1176
1177 if (vid >= sc->sc_nunits)
1178 return (EINVAL);
1179 if ((error = ips_getconf(sc, 0)))
1180 return (error);
1181 ld = &conf->ld[vid];
1182
1183 if (did >= ld->chunkcnt) {
1184 /* Probably unused or spare drives */
1185 if (vid != 0)
1186 return (EINVAL);
1187
1188 i = ld->chunkcnt;
1189 for (chan = 0; chan < IPS_MAXCHANS; chan++)
1190 for (target = 0; target < IPS_MAXTARGETS; target++) {
1191 dev = &conf->dev[chan][target];
1192 if (dev->state && !(dev->state &
1193 IPS_DVS_MEMBER) &&
1194 (dev->params & SID_TYPE) == T_DIRECT)
1195 if (i++ == did)
1196 goto out;
1197 }
1198 } else {
1199 chunk = &ld->chunk[did];
1200 chan = chunk->channel;
1201 target = chunk->target;
1202 }
1203
1204 out:
1205 if (chan >= IPS_MAXCHANS || target >= IPS_MAXTARGETS)
1206 return (EINVAL);
1207 dev = &conf->dev[chan][target];
1208
1209 bd->bd_channel = chan;
1210 bd->bd_target = target;
1211 bd->bd_lun = 0;
1212 bd->bd_size = (uint64_t)letoh32(dev->seccnt) * IPS_SECSZ;
1213
1214 bzero(bd->bd_vendor, sizeof(bd->bd_vendor));
1215 memcpy(bd->bd_vendor, dev->devid, MIN(sizeof(bd->bd_vendor),
1216 sizeof(dev->devid)));
1217 strlcpy(bd->bd_procdev, sc->sc_pt[chan].pt_procdev,
1218 sizeof(bd->bd_procdev));
1219
1220 if (dev->state & IPS_DVS_READY) {
1221 bd->bd_status = BIOC_SDUNUSED;
1222 if (dev->state & IPS_DVS_MEMBER)
1223 bd->bd_status = BIOC_SDONLINE;
1224 if (dev->state & IPS_DVS_SPARE)
1225 bd->bd_status = BIOC_SDHOTSPARE;
1226 if (dev->state & IPS_DVS_REBUILD)
1227 bd->bd_status = BIOC_SDREBUILD;
1228 } else {
1229 bd->bd_status = BIOC_SDOFFLINE;
1230 }
1231
1232 DPRINTF(IPS_D_INFO, ("%s: ips_ioctl_disk: vid %d, did %d, channel %d, "
1233 "target %d, size %llu, state 0x%02x\n", sc->sc_dev.dv_xname,
1234 vid, did, bd->bd_channel, bd->bd_target, bd->bd_size, dev->state));
1235
1236 return (0);
1237 }
1238
1239 int
ips_ioctl_setstate(struct ips_softc * sc,struct bioc_setstate * bs)1240 ips_ioctl_setstate(struct ips_softc *sc, struct bioc_setstate *bs)
1241 {
1242 struct ips_conf *conf = &sc->sc_info->conf;
1243 struct ips_dev *dev;
1244 int state, error;
1245
1246 if (bs->bs_channel >= IPS_MAXCHANS || bs->bs_target >= IPS_MAXTARGETS)
1247 return (EINVAL);
1248 if ((error = ips_getconf(sc, 0)))
1249 return (error);
1250 dev = &conf->dev[bs->bs_channel][bs->bs_target];
1251 state = dev->state;
1252
1253 switch (bs->bs_status) {
1254 case BIOC_SSONLINE:
1255 state |= IPS_DVS_READY;
1256 break;
1257 case BIOC_SSOFFLINE:
1258 state &= ~IPS_DVS_READY;
1259 break;
1260 case BIOC_SSHOTSPARE:
1261 state |= IPS_DVS_SPARE;
1262 break;
1263 case BIOC_SSREBUILD:
1264 return (ips_rebuild(sc, bs->bs_channel, bs->bs_target,
1265 bs->bs_channel, bs->bs_target, 0));
1266 default:
1267 return (EINVAL);
1268 }
1269
1270 return (ips_setstate(sc, bs->bs_channel, bs->bs_target, state, 0));
1271 }
1272 #endif /* NBIO > 0 */
1273
1274 #ifndef SMALL_KERNEL
1275 void
ips_sensors(void * arg)1276 ips_sensors(void *arg)
1277 {
1278 struct ips_softc *sc = arg;
1279 struct ips_conf *conf = &sc->sc_info->conf;
1280 struct ips_ld *ld;
1281 int i;
1282
1283 /* ips_sensors() runs from work queue thus allowed to sleep */
1284 if (ips_getconf(sc, 0)) {
1285 DPRINTF(IPS_D_ERR, ("%s: ips_sensors: ips_getconf failed\n",
1286 sc->sc_dev.dv_xname));
1287
1288 for (i = 0; i < sc->sc_nunits; i++) {
1289 sc->sc_sensors[i].value = 0;
1290 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1291 }
1292 return;
1293 }
1294
1295 DPRINTF(IPS_D_INFO, ("%s: ips_sensors:", sc->sc_dev.dv_xname));
1296 for (i = 0; i < sc->sc_nunits; i++) {
1297 ld = &conf->ld[i];
1298 DPRINTF(IPS_D_INFO, (" ld%d.state 0x%02x", i, ld->state));
1299 switch (ld->state) {
1300 case IPS_DS_ONLINE:
1301 sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
1302 sc->sc_sensors[i].status = SENSOR_S_OK;
1303 break;
1304 case IPS_DS_DEGRADED:
1305 sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
1306 sc->sc_sensors[i].status = SENSOR_S_WARN;
1307 break;
1308 case IPS_DS_OFFLINE:
1309 sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
1310 sc->sc_sensors[i].status = SENSOR_S_CRIT;
1311 break;
1312 default:
1313 sc->sc_sensors[i].value = 0;
1314 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
1315 }
1316 }
1317 DPRINTF(IPS_D_INFO, ("\n"));
1318 }
1319 #endif /* !SMALL_KERNEL */
1320
1321 int
ips_load_xs(struct ips_softc * sc,struct ips_ccb * ccb,struct scsi_xfer * xs)1322 ips_load_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1323 {
1324 struct ips_cmdb *cmdb = ccb->c_cmdbva;
1325 struct ips_cmd *cmd = &cmdb->cmd;
1326 struct ips_sg *sg = cmdb->sg;
1327 int nsegs, i;
1328
1329 if (xs->datalen == 0)
1330 return (0);
1331
1332 /* Map data buffer into DMA segments */
1333 if (bus_dmamap_load(sc->sc_dmat, ccb->c_dmam, xs->data, xs->datalen,
1334 NULL, (xs->flags & SCSI_NOSLEEP ? BUS_DMA_NOWAIT : 0)))
1335 return (1);
1336 bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,ccb->c_dmam->dm_mapsize,
1337 xs->flags & SCSI_DATA_IN ? BUS_DMASYNC_PREREAD :
1338 BUS_DMASYNC_PREWRITE);
1339
1340 if ((nsegs = ccb->c_dmam->dm_nsegs) > IPS_MAXSGS)
1341 return (1);
1342
1343 if (nsegs > 1) {
1344 cmd->sgcnt = nsegs;
1345 cmd->sgaddr = htole32(ccb->c_cmdbpa + offsetof(struct ips_cmdb,
1346 sg));
1347
1348 /* Fill in scatter-gather array */
1349 for (i = 0; i < nsegs; i++) {
1350 sg[i].addr = htole32(ccb->c_dmam->dm_segs[i].ds_addr);
1351 sg[i].size = htole32(ccb->c_dmam->dm_segs[i].ds_len);
1352 }
1353 } else {
1354 cmd->sgcnt = 0;
1355 cmd->sgaddr = htole32(ccb->c_dmam->dm_segs[0].ds_addr);
1356 }
1357
1358 return (0);
1359 }
1360
1361 void
ips_start_xs(struct ips_softc * sc,struct ips_ccb * ccb,struct scsi_xfer * xs)1362 ips_start_xs(struct ips_softc *sc, struct ips_ccb *ccb, struct scsi_xfer *xs)
1363 {
1364 ccb->c_flags = xs->flags;
1365 ccb->c_xfer = xs;
1366 int ispoll = xs->flags & SCSI_POLL;
1367
1368 if (!ispoll) {
1369 timeout_set(&xs->stimeout, ips_timeout, ccb);
1370 timeout_add_msec(&xs->stimeout, xs->timeout);
1371 }
1372
1373 /*
1374 * Return value not used here because ips_cmd() must complete
1375 * scsi_xfer on any failure and SCSI layer will handle possible
1376 * errors.
1377 */
1378 ips_cmd(sc, ccb);
1379 }
1380
1381 int
ips_cmd(struct ips_softc * sc,struct ips_ccb * ccb)1382 ips_cmd(struct ips_softc *sc, struct ips_ccb *ccb)
1383 {
1384 struct ips_cmd *cmd = ccb->c_cmdbva;
1385 int s, error = 0;
1386
1387 DPRINTF(IPS_D_XFER, ("%s: ips_cmd: id 0x%02x, flags 0x%x, xs %p, "
1388 "code 0x%02x, drive %d, sgcnt %d, lba %d, sgaddr 0x%08x, "
1389 "seccnt %d\n", sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags,
1390 ccb->c_xfer, cmd->code, cmd->drive, cmd->sgcnt, letoh32(cmd->lba),
1391 letoh32(cmd->sgaddr), letoh16(cmd->seccnt)));
1392
1393 cmd->id = ccb->c_id;
1394
1395 /* Post command to controller and optionally wait for completion */
1396 s = splbio();
1397 ips_exec(sc, ccb);
1398 ccb->c_state = IPS_CCB_QUEUED;
1399 if (ccb->c_flags & SCSI_POLL)
1400 error = ips_poll(sc, ccb);
1401 splx(s);
1402
1403 return (error);
1404 }
1405
1406 int
ips_poll(struct ips_softc * sc,struct ips_ccb * ccb)1407 ips_poll(struct ips_softc *sc, struct ips_ccb *ccb)
1408 {
1409 int error, msecs, usecs;
1410
1411 splassert(IPL_BIO);
1412
1413 if (ccb->c_flags & SCSI_NOSLEEP) {
1414 /* busy-wait */
1415 DPRINTF(IPS_D_XFER, ("%s: ips_poll: busy-wait\n",
1416 sc->sc_dev.dv_xname));
1417
1418 for (usecs = 1000000; usecs > 0; usecs -= 100) {
1419 delay(100);
1420 ips_intr(sc);
1421 if (ccb->c_state == IPS_CCB_DONE)
1422 break;
1423 }
1424 } else {
1425 /* sleep */
1426 msecs = ccb->c_xfer ? ccb->c_xfer->timeout : IPS_TIMEOUT;
1427
1428 DPRINTF(IPS_D_XFER, ("%s: ips_poll: sleep %d ms\n",
1429 sc->sc_dev.dv_xname, msecs));
1430 tsleep_nsec(ccb, PRIBIO + 1, "ipscmd", MSEC_TO_NSEC(msecs));
1431 }
1432 DPRINTF(IPS_D_XFER, ("%s: ips_poll: state %d\n", sc->sc_dev.dv_xname,
1433 ccb->c_state));
1434
1435 if (ccb->c_state != IPS_CCB_DONE)
1436 /*
1437 * Command never completed. Fake hardware status byte
1438 * to indicate timeout.
1439 */
1440 ccb->c_stat = IPS_STAT_TIMO;
1441
1442 ips_done(sc, ccb);
1443 error = ccb->c_error;
1444
1445 return (error);
1446 }
1447
1448 void
ips_done(struct ips_softc * sc,struct ips_ccb * ccb)1449 ips_done(struct ips_softc *sc, struct ips_ccb *ccb)
1450 {
1451 splassert(IPL_BIO);
1452
1453 DPRINTF(IPS_D_XFER, ("%s: ips_done: id 0x%02x, flags 0x%x, xs %p\n",
1454 sc->sc_dev.dv_xname, ccb->c_id, ccb->c_flags, ccb->c_xfer));
1455
1456 ccb->c_error = ips_error(sc, ccb);
1457 ccb->c_done(sc, ccb);
1458 }
1459
1460 void
ips_done_xs(struct ips_softc * sc,struct ips_ccb * ccb)1461 ips_done_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1462 {
1463 struct scsi_xfer *xs = ccb->c_xfer;
1464
1465 if (!(xs->flags & SCSI_POLL))
1466 timeout_del(&xs->stimeout);
1467
1468 if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1469 bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1470 ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1471 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1472 bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1473 }
1474
1475 xs->resid = 0;
1476 xs->error = ips_error_xs(sc, ccb);
1477 scsi_done(xs);
1478 }
1479
1480 void
ips_done_pt(struct ips_softc * sc,struct ips_ccb * ccb)1481 ips_done_pt(struct ips_softc *sc, struct ips_ccb *ccb)
1482 {
1483 struct scsi_xfer *xs = ccb->c_xfer;
1484 struct ips_cmdb *cmdb = ccb->c_cmdbva;
1485 struct ips_dcdb *dcdb = &cmdb->dcdb;
1486 int done = letoh16(dcdb->datalen);
1487
1488 if (!(xs->flags & SCSI_POLL))
1489 timeout_del(&xs->stimeout);
1490
1491 if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1492 bus_dmamap_sync(sc->sc_dmat, ccb->c_dmam, 0,
1493 ccb->c_dmam->dm_mapsize, xs->flags & SCSI_DATA_IN ?
1494 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1495 bus_dmamap_unload(sc->sc_dmat, ccb->c_dmam);
1496 }
1497
1498 if (done && done < xs->datalen)
1499 xs->resid = xs->datalen - done;
1500 else
1501 xs->resid = 0;
1502 xs->error = ips_error_xs(sc, ccb);
1503 xs->status = dcdb->status;
1504
1505 if (xs->error == XS_SENSE)
1506 memcpy(&xs->sense, dcdb->sense, MIN(sizeof(xs->sense),
1507 sizeof(dcdb->sense)));
1508
1509 if (xs->cmd.opcode == INQUIRY && xs->error == XS_NOERROR) {
1510 int type = ((struct scsi_inquiry_data *)xs->data)->device &
1511 SID_TYPE;
1512
1513 if (type == T_DIRECT)
1514 /* mask physical drives */
1515 xs->error = XS_DRIVER_STUFFUP;
1516 }
1517
1518 scsi_done(xs);
1519 }
1520
1521 void
ips_done_mgmt(struct ips_softc * sc,struct ips_ccb * ccb)1522 ips_done_mgmt(struct ips_softc *sc, struct ips_ccb *ccb)
1523 {
1524 if (ccb->c_flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1525 bus_dmamap_sync(sc->sc_dmat, sc->sc_infom.dm_map, 0,
1526 sc->sc_infom.dm_map->dm_mapsize,
1527 ccb->c_flags & SCSI_DATA_IN ? BUS_DMASYNC_POSTREAD :
1528 BUS_DMASYNC_POSTWRITE);
1529 scsi_io_put(&sc->sc_iopool, ccb);
1530 }
1531
1532 int
ips_error(struct ips_softc * sc,struct ips_ccb * ccb)1533 ips_error(struct ips_softc *sc, struct ips_ccb *ccb)
1534 {
1535 struct ips_cmdb *cmdb = ccb->c_cmdbva;
1536 struct ips_cmd *cmd = &cmdb->cmd;
1537 struct ips_dcdb *dcdb = &cmdb->dcdb;
1538 struct scsi_xfer *xs = ccb->c_xfer;
1539 u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1540
1541 if (gsc == IPS_STAT_OK)
1542 return (0);
1543
1544 DPRINTF(IPS_D_ERR, ("%s: ips_error: stat 0x%02x, estat 0x%02x, "
1545 "cmd code 0x%02x, drive %d, sgcnt %d, lba %u, seccnt %d",
1546 sc->sc_dev.dv_xname, ccb->c_stat, ccb->c_estat, cmd->code,
1547 cmd->drive, cmd->sgcnt, letoh32(cmd->lba), letoh16(cmd->seccnt)));
1548 if (cmd->code == IPS_CMD_DCDB || cmd->code == IPS_CMD_DCDB_SG) {
1549 int i;
1550
1551 DPRINTF(IPS_D_ERR, (", dcdb device 0x%02x, attr 0x%02x, "
1552 "datalen %d, sgcnt %d, status 0x%02x",
1553 dcdb->device, dcdb->attr, letoh16(dcdb->datalen),
1554 dcdb->sgcnt, dcdb->status));
1555
1556 DPRINTF(IPS_D_ERR, (", cdb"));
1557 for (i = 0; i < dcdb->cdblen; i++)
1558 DPRINTF(IPS_D_ERR, (" %x", dcdb->cdb[i]));
1559 if (ccb->c_estat == IPS_ESTAT_CKCOND) {
1560 DPRINTF(IPS_D_ERR, (", sense"));
1561 for (i = 0; i < dcdb->senselen; i++)
1562 DPRINTF(IPS_D_ERR, (" %x", dcdb->sense[i]));
1563 }
1564 }
1565 DPRINTF(IPS_D_ERR, ("\n"));
1566
1567 switch (gsc) {
1568 case IPS_STAT_RECOV:
1569 return (0);
1570 case IPS_STAT_INVOP:
1571 case IPS_STAT_INVCMD:
1572 case IPS_STAT_INVPARM:
1573 return (EINVAL);
1574 case IPS_STAT_BUSY:
1575 return (EBUSY);
1576 case IPS_STAT_TIMO:
1577 return (ETIMEDOUT);
1578 case IPS_STAT_PDRVERR:
1579 switch (ccb->c_estat) {
1580 case IPS_ESTAT_SELTIMO:
1581 return (ENODEV);
1582 case IPS_ESTAT_OURUN:
1583 if (xs && letoh16(dcdb->datalen) < xs->datalen)
1584 /* underrun */
1585 return (0);
1586 break;
1587 case IPS_ESTAT_RECOV:
1588 return (0);
1589 }
1590 break;
1591 }
1592
1593 return (EIO);
1594 }
1595
1596 int
ips_error_xs(struct ips_softc * sc,struct ips_ccb * ccb)1597 ips_error_xs(struct ips_softc *sc, struct ips_ccb *ccb)
1598 {
1599 struct ips_cmdb *cmdb = ccb->c_cmdbva;
1600 struct ips_dcdb *dcdb = &cmdb->dcdb;
1601 struct scsi_xfer *xs = ccb->c_xfer;
1602 u_int8_t gsc = IPS_STAT_GSC(ccb->c_stat);
1603
1604 /* Map hardware error codes to SCSI ones */
1605 switch (gsc) {
1606 case IPS_STAT_OK:
1607 case IPS_STAT_RECOV:
1608 return (XS_NOERROR);
1609 case IPS_STAT_BUSY:
1610 return (XS_BUSY);
1611 case IPS_STAT_TIMO:
1612 return (XS_TIMEOUT);
1613 case IPS_STAT_PDRVERR:
1614 switch (ccb->c_estat) {
1615 case IPS_ESTAT_SELTIMO:
1616 return (XS_SELTIMEOUT);
1617 case IPS_ESTAT_OURUN:
1618 if (xs && letoh16(dcdb->datalen) < xs->datalen)
1619 /* underrun */
1620 return (XS_NOERROR);
1621 break;
1622 case IPS_ESTAT_HOSTRST:
1623 case IPS_ESTAT_DEVRST:
1624 return (XS_RESET);
1625 case IPS_ESTAT_RECOV:
1626 return (XS_NOERROR);
1627 case IPS_ESTAT_CKCOND:
1628 return (XS_SENSE);
1629 }
1630 break;
1631 }
1632
1633 return (XS_DRIVER_STUFFUP);
1634 }
1635
1636 int
ips_intr(void * arg)1637 ips_intr(void *arg)
1638 {
1639 struct ips_softc *sc = arg;
1640 struct ips_ccb *ccb;
1641 u_int32_t status;
1642 int id;
1643
1644 DPRINTF(IPS_D_XFER, ("%s: ips_intr", sc->sc_dev.dv_xname));
1645 if (!ips_isintr(sc)) {
1646 DPRINTF(IPS_D_XFER, (": not ours\n"));
1647 return (0);
1648 }
1649 DPRINTF(IPS_D_XFER, ("\n"));
1650
1651 /* Process completed commands */
1652 while ((status = ips_status(sc)) != 0xffffffff) {
1653 DPRINTF(IPS_D_XFER, ("%s: ips_intr: status 0x%08x\n",
1654 sc->sc_dev.dv_xname, status));
1655
1656 id = IPS_STAT_ID(status);
1657 if (id >= sc->sc_nccbs) {
1658 DPRINTF(IPS_D_ERR, ("%s: ips_intr: invalid id %d\n",
1659 sc->sc_dev.dv_xname, id));
1660 continue;
1661 }
1662
1663 ccb = &sc->sc_ccb[id];
1664 if (ccb->c_state != IPS_CCB_QUEUED) {
1665 DPRINTF(IPS_D_ERR, ("%s: ips_intr: cmd 0x%02x not "
1666 "queued, state %d, status 0x%08x\n",
1667 sc->sc_dev.dv_xname, ccb->c_id, ccb->c_state,
1668 status));
1669 continue;
1670 }
1671
1672 ccb->c_state = IPS_CCB_DONE;
1673 ccb->c_stat = IPS_STAT_BASIC(status);
1674 ccb->c_estat = IPS_STAT_EXT(status);
1675
1676 if (ccb->c_flags & SCSI_POLL) {
1677 wakeup(ccb);
1678 } else {
1679 ips_done(sc, ccb);
1680 }
1681 }
1682
1683 return (1);
1684 }
1685
1686 void
ips_timeout(void * arg)1687 ips_timeout(void *arg)
1688 {
1689 struct ips_ccb *ccb = arg;
1690 struct ips_softc *sc = ccb->c_sc;
1691 struct scsi_xfer *xs = ccb->c_xfer;
1692 int s;
1693
1694 s = splbio();
1695 if (xs)
1696 sc_print_addr(xs->sc_link);
1697 else
1698 printf("%s: ", sc->sc_dev.dv_xname);
1699 printf("timeout\n");
1700
1701 /*
1702 * Command never completed. Fake hardware status byte
1703 * to indicate timeout.
1704 * XXX: need to remove command from controller.
1705 */
1706 ccb->c_stat = IPS_STAT_TIMO;
1707 ips_done(sc, ccb);
1708 splx(s);
1709 }
1710
1711 int
ips_getadapterinfo(struct ips_softc * sc,int flags)1712 ips_getadapterinfo(struct ips_softc *sc, int flags)
1713 {
1714 struct ips_ccb *ccb;
1715 struct ips_cmd *cmd;
1716
1717 ccb = scsi_io_get(&sc->sc_iopool, 0);
1718 if (ccb == NULL)
1719 return (1);
1720
1721 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1722 ccb->c_done = ips_done_mgmt;
1723
1724 cmd = ccb->c_cmdbva;
1725 cmd->code = IPS_CMD_GETADAPTERINFO;
1726 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1727 adapter));
1728
1729 return (ips_cmd(sc, ccb));
1730 }
1731
1732 int
ips_getdriveinfo(struct ips_softc * sc,int flags)1733 ips_getdriveinfo(struct ips_softc *sc, int flags)
1734 {
1735 struct ips_ccb *ccb;
1736 struct ips_cmd *cmd;
1737
1738 ccb = scsi_io_get(&sc->sc_iopool, 0);
1739 if (ccb == NULL)
1740 return (1);
1741
1742 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1743 ccb->c_done = ips_done_mgmt;
1744
1745 cmd = ccb->c_cmdbva;
1746 cmd->code = IPS_CMD_GETDRIVEINFO;
1747 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1748 drive));
1749
1750 return (ips_cmd(sc, ccb));
1751 }
1752
1753 int
ips_getconf(struct ips_softc * sc,int flags)1754 ips_getconf(struct ips_softc *sc, int flags)
1755 {
1756 struct ips_ccb *ccb;
1757 struct ips_cmd *cmd;
1758
1759 ccb = scsi_io_get(&sc->sc_iopool, 0);
1760 if (ccb == NULL)
1761 return (1);
1762
1763 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1764 ccb->c_done = ips_done_mgmt;
1765
1766 cmd = ccb->c_cmdbva;
1767 cmd->code = IPS_CMD_READCONF;
1768 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1769 conf));
1770
1771 return (ips_cmd(sc, ccb));
1772 }
1773
1774 int
ips_getpg5(struct ips_softc * sc,int flags)1775 ips_getpg5(struct ips_softc *sc, int flags)
1776 {
1777 struct ips_ccb *ccb;
1778 struct ips_cmd *cmd;
1779
1780 ccb = scsi_io_get(&sc->sc_iopool, 0);
1781 if (ccb == NULL)
1782 return (1);
1783
1784 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1785 ccb->c_done = ips_done_mgmt;
1786
1787 cmd = ccb->c_cmdbva;
1788 cmd->code = IPS_CMD_RWNVRAM;
1789 cmd->drive = 5;
1790 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1791 pg5));
1792
1793 return (ips_cmd(sc, ccb));
1794 }
1795
1796 #if NBIO > 0
1797 int
ips_getrblstat(struct ips_softc * sc,int flags)1798 ips_getrblstat(struct ips_softc *sc, int flags)
1799 {
1800 struct ips_ccb *ccb;
1801 struct ips_cmd *cmd;
1802
1803 ccb = scsi_io_get(&sc->sc_iopool, 0);
1804 if (ccb == NULL)
1805 return (1);
1806
1807 ccb->c_flags = SCSI_DATA_IN | SCSI_POLL | flags;
1808 ccb->c_done = ips_done_mgmt;
1809
1810 cmd = ccb->c_cmdbva;
1811 cmd->code = IPS_CMD_REBUILDSTATUS;
1812 cmd->sgaddr = htole32(sc->sc_infom.dm_paddr + offsetof(struct ips_info,
1813 rblstat));
1814
1815 return (ips_cmd(sc, ccb));
1816 }
1817
1818 int
ips_setstate(struct ips_softc * sc,int chan,int target,int state,int flags)1819 ips_setstate(struct ips_softc *sc, int chan, int target, int state, int flags)
1820 {
1821 struct ips_ccb *ccb;
1822 struct ips_cmd *cmd;
1823
1824 ccb = scsi_io_get(&sc->sc_iopool, 0);
1825 if (ccb == NULL)
1826 return (1);
1827
1828 ccb->c_flags = SCSI_POLL | flags;
1829 ccb->c_done = ips_done_mgmt;
1830
1831 cmd = ccb->c_cmdbva;
1832 cmd->code = IPS_CMD_SETSTATE;
1833 cmd->drive = chan;
1834 cmd->sgcnt = target;
1835 cmd->seg4g = state;
1836
1837 return (ips_cmd(sc, ccb));
1838 }
1839
1840 int
ips_rebuild(struct ips_softc * sc,int chan,int target,int nchan,int ntarget,int flags)1841 ips_rebuild(struct ips_softc *sc, int chan, int target, int nchan,
1842 int ntarget, int flags)
1843 {
1844 struct ips_ccb *ccb;
1845 struct ips_cmd *cmd;
1846
1847 ccb = scsi_io_get(&sc->sc_iopool, 0);
1848 if (ccb == NULL)
1849 return (1);
1850
1851 ccb->c_flags = SCSI_POLL | flags;
1852 ccb->c_done = ips_done_mgmt;
1853
1854 cmd = ccb->c_cmdbva;
1855 cmd->code = IPS_CMD_REBUILD;
1856 cmd->drive = chan;
1857 cmd->sgcnt = target;
1858 cmd->seccnt = htole16(ntarget << 8 | nchan);
1859
1860 return (ips_cmd(sc, ccb));
1861 }
1862 #endif /* NBIO > 0 */
1863
1864 void
ips_copperhead_exec(struct ips_softc * sc,struct ips_ccb * ccb)1865 ips_copperhead_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1866 {
1867 u_int32_t reg;
1868 int timeout;
1869
1870 for (timeout = 100; timeout-- > 0; delay(100)) {
1871 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC);
1872 if ((reg & IPS_REG_CCC_SEM) == 0)
1873 break;
1874 }
1875 if (timeout < 0) {
1876 printf("%s: semaphore timeout\n", sc->sc_dev.dv_xname);
1877 return;
1878 }
1879
1880 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_CCSA, ccb->c_cmdbpa);
1881 bus_space_write_2(sc->sc_iot, sc->sc_ioh, IPS_REG_CCC,
1882 IPS_REG_CCC_START);
1883 }
1884
1885 void
ips_copperhead_intren(struct ips_softc * sc)1886 ips_copperhead_intren(struct ips_softc *sc)
1887 {
1888 bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, IPS_REG_HIS_EN);
1889 }
1890
1891 int
ips_copperhead_isintr(struct ips_softc * sc)1892 ips_copperhead_isintr(struct ips_softc *sc)
1893 {
1894 u_int8_t reg;
1895
1896 reg = bus_space_read_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS);
1897 bus_space_write_1(sc->sc_iot, sc->sc_ioh, IPS_REG_HIS, reg);
1898 if (reg != 0xff && (reg & IPS_REG_HIS_SCE))
1899 return (1);
1900
1901 return (0);
1902 }
1903
1904 u_int32_t
ips_copperhead_status(struct ips_softc * sc)1905 ips_copperhead_status(struct ips_softc *sc)
1906 {
1907 u_int32_t sqhead, sqtail, status;
1908
1909 sqhead = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQH);
1910 DPRINTF(IPS_D_XFER, ("%s: sqhead 0x%08x, sqtail 0x%08lx\n",
1911 sc->sc_dev.dv_xname, sqhead, sc->sc_sqtail));
1912
1913 sqtail = sc->sc_sqtail + sizeof(u_int32_t);
1914 if (sqtail == sc->sc_sqm.dm_paddr + IPS_SQSZ)
1915 sqtail = sc->sc_sqm.dm_paddr;
1916 if (sqtail == sqhead)
1917 return (0xffffffff);
1918
1919 sc->sc_sqtail = sqtail;
1920 if (++sc->sc_sqidx == IPS_MAXCMDS)
1921 sc->sc_sqidx = 0;
1922 status = letoh32(sc->sc_sqbuf[sc->sc_sqidx]);
1923 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_SQT, sqtail);
1924
1925 return (status);
1926 }
1927
1928 void
ips_morpheus_exec(struct ips_softc * sc,struct ips_ccb * ccb)1929 ips_morpheus_exec(struct ips_softc *sc, struct ips_ccb *ccb)
1930 {
1931 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_IQP, ccb->c_cmdbpa);
1932 }
1933
1934 void
ips_morpheus_intren(struct ips_softc * sc)1935 ips_morpheus_intren(struct ips_softc *sc)
1936 {
1937 u_int32_t reg;
1938
1939 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM);
1940 reg &= ~IPS_REG_OIM_DS;
1941 bus_space_write_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIM, reg);
1942 }
1943
1944 int
ips_morpheus_isintr(struct ips_softc * sc)1945 ips_morpheus_isintr(struct ips_softc *sc)
1946 {
1947 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OIS) &
1948 IPS_REG_OIS_PEND);
1949 }
1950
1951 u_int32_t
ips_morpheus_status(struct ips_softc * sc)1952 ips_morpheus_status(struct ips_softc *sc)
1953 {
1954 u_int32_t reg;
1955
1956 reg = bus_space_read_4(sc->sc_iot, sc->sc_ioh, IPS_REG_OQP);
1957 DPRINTF(IPS_D_XFER, ("%s: status 0x%08x\n", sc->sc_dev.dv_xname, reg));
1958
1959 return (reg);
1960 }
1961
1962 struct ips_ccb *
ips_ccb_alloc(struct ips_softc * sc,int n)1963 ips_ccb_alloc(struct ips_softc *sc, int n)
1964 {
1965 struct ips_ccb *ccb;
1966 int i;
1967
1968 if ((ccb = mallocarray(n, sizeof(*ccb), M_DEVBUF,
1969 M_NOWAIT | M_ZERO)) == NULL)
1970 return (NULL);
1971
1972 for (i = 0; i < n; i++) {
1973 ccb[i].c_sc = sc;
1974 ccb[i].c_id = i;
1975 ccb[i].c_cmdbva = (char *)sc->sc_cmdbm.dm_vaddr +
1976 i * sizeof(struct ips_cmdb);
1977 ccb[i].c_cmdbpa = sc->sc_cmdbm.dm_paddr +
1978 i * sizeof(struct ips_cmdb);
1979 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, IPS_MAXSGS,
1980 MAXPHYS, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1981 &ccb[i].c_dmam))
1982 goto fail;
1983 }
1984
1985 return (ccb);
1986 fail:
1987 for (; i > 0; i--)
1988 bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
1989 free(ccb, M_DEVBUF, n * sizeof(*ccb));
1990 return (NULL);
1991 }
1992
1993 void
ips_ccb_free(struct ips_softc * sc,struct ips_ccb * ccb,int n)1994 ips_ccb_free(struct ips_softc *sc, struct ips_ccb *ccb, int n)
1995 {
1996 int i;
1997
1998 for (i = 0; i < n; i++)
1999 bus_dmamap_destroy(sc->sc_dmat, ccb[i - 1].c_dmam);
2000 free(ccb, M_DEVBUF, n * sizeof(*ccb));
2001 }
2002
2003 void *
ips_ccb_get(void * xsc)2004 ips_ccb_get(void *xsc)
2005 {
2006 struct ips_softc *sc = xsc;
2007 struct ips_ccb *ccb;
2008
2009 mtx_enter(&sc->sc_ccb_mtx);
2010 if ((ccb = SLIST_FIRST(&sc->sc_ccbq_free)) != NULL) {
2011 SLIST_REMOVE_HEAD(&sc->sc_ccbq_free, c_link);
2012 ccb->c_flags = 0;
2013 ccb->c_xfer = NULL;
2014 bzero(ccb->c_cmdbva, sizeof(struct ips_cmdb));
2015 }
2016 mtx_leave(&sc->sc_ccb_mtx);
2017
2018 return (ccb);
2019 }
2020
2021 void
ips_ccb_put(void * xsc,void * xccb)2022 ips_ccb_put(void *xsc, void *xccb)
2023 {
2024 struct ips_softc *sc = xsc;
2025 struct ips_ccb *ccb = xccb;
2026
2027 ccb->c_state = IPS_CCB_FREE;
2028 mtx_enter(&sc->sc_ccb_mtx);
2029 SLIST_INSERT_HEAD(&sc->sc_ccbq_free, ccb, c_link);
2030 mtx_leave(&sc->sc_ccb_mtx);
2031 }
2032
2033 int
ips_dmamem_alloc(struct dmamem * dm,bus_dma_tag_t tag,bus_size_t size)2034 ips_dmamem_alloc(struct dmamem *dm, bus_dma_tag_t tag, bus_size_t size)
2035 {
2036 int nsegs;
2037
2038 dm->dm_tag = tag;
2039 dm->dm_size = size;
2040
2041 if (bus_dmamap_create(tag, size, 1, size, 0,
2042 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &dm->dm_map))
2043 return (1);
2044 if (bus_dmamem_alloc(tag, size, 0, 0, &dm->dm_seg, 1, &nsegs,
2045 BUS_DMA_NOWAIT))
2046 goto fail1;
2047 if (bus_dmamem_map(tag, &dm->dm_seg, 1, size, (caddr_t *)&dm->dm_vaddr,
2048 BUS_DMA_NOWAIT))
2049 goto fail2;
2050 if (bus_dmamap_load(tag, dm->dm_map, dm->dm_vaddr, size, NULL,
2051 BUS_DMA_NOWAIT))
2052 goto fail3;
2053
2054 return (0);
2055
2056 fail3:
2057 bus_dmamem_unmap(tag, dm->dm_vaddr, size);
2058 fail2:
2059 bus_dmamem_free(tag, &dm->dm_seg, 1);
2060 fail1:
2061 bus_dmamap_destroy(tag, dm->dm_map);
2062 return (1);
2063 }
2064
2065 void
ips_dmamem_free(struct dmamem * dm)2066 ips_dmamem_free(struct dmamem *dm)
2067 {
2068 bus_dmamap_unload(dm->dm_tag, dm->dm_map);
2069 bus_dmamem_unmap(dm->dm_tag, dm->dm_vaddr, dm->dm_size);
2070 bus_dmamem_free(dm->dm_tag, &dm->dm_seg, 1);
2071 bus_dmamap_destroy(dm->dm_tag, dm->dm_map);
2072 }
2073