1 /* $OpenBSD: mpii.c,v 1.148 2024/05/24 06:02:58 jsg Exp $ */
2 /*
3 * Copyright (c) 2010, 2012 Mike Belopuhov
4 * Copyright (c) 2009 James Giannoules
5 * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
6 * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "bio.h"
22
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/malloc.h>
27 #include <sys/sensors.h>
28 #include <sys/dkio.h>
29 #include <sys/task.h>
30
31 #include <machine/bus.h>
32
33 #include <dev/pci/pcireg.h>
34 #include <dev/pci/pcivar.h>
35 #include <dev/pci/pcidevs.h>
36
37 #include <scsi/scsi_all.h>
38 #include <scsi/scsiconf.h>
39
40 #include <dev/biovar.h>
41
42 #include <dev/pci/mpiireg.h>
43
44 /* #define MPII_DEBUG */
45 #ifdef MPII_DEBUG
46 #define DPRINTF(x...) do { if (mpii_debug) printf(x); } while(0)
47 #define DNPRINTF(n,x...) do { if (mpii_debug & (n)) printf(x); } while(0)
48 #define MPII_D_CMD (0x0001)
49 #define MPII_D_INTR (0x0002)
50 #define MPII_D_MISC (0x0004)
51 #define MPII_D_DMA (0x0008)
52 #define MPII_D_IOCTL (0x0010)
53 #define MPII_D_RW (0x0020)
54 #define MPII_D_MEM (0x0040)
55 #define MPII_D_CCB (0x0080)
56 #define MPII_D_PPR (0x0100)
57 #define MPII_D_RAID (0x0200)
58 #define MPII_D_EVT (0x0400)
59 #define MPII_D_CFG (0x0800)
60 #define MPII_D_MAP (0x1000)
61
62 u_int32_t mpii_debug = 0
63 | MPII_D_CMD
64 | MPII_D_INTR
65 | MPII_D_MISC
66 | MPII_D_DMA
67 | MPII_D_IOCTL
68 | MPII_D_RW
69 | MPII_D_MEM
70 | MPII_D_CCB
71 | MPII_D_PPR
72 | MPII_D_RAID
73 | MPII_D_EVT
74 | MPII_D_CFG
75 | MPII_D_MAP
76 ;
77 #else
78 #define DPRINTF(x...)
79 #define DNPRINTF(n,x...)
80 #endif
81
82 #define MPII_REQUEST_SIZE (512)
83 #define MPII_REQUEST_CREDIT (128)
84
85 struct mpii_dmamem {
86 bus_dmamap_t mdm_map;
87 bus_dma_segment_t mdm_seg;
88 size_t mdm_size;
89 caddr_t mdm_kva;
90 };
91 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
92 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
93 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
94
95 struct mpii_softc;
96
97 struct mpii_rcb {
98 SIMPLEQ_ENTRY(mpii_rcb) rcb_link;
99 void *rcb_reply;
100 u_int32_t rcb_reply_dva;
101 };
102
103 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
104
105 struct mpii_device {
106 int flags;
107 #define MPII_DF_ATTACH (0x0001)
108 #define MPII_DF_DETACH (0x0002)
109 #define MPII_DF_HIDDEN (0x0004)
110 #define MPII_DF_UNUSED (0x0008)
111 #define MPII_DF_VOLUME (0x0010)
112 #define MPII_DF_VOLUME_DISK (0x0020)
113 #define MPII_DF_HOT_SPARE (0x0040)
114 short slot;
115 short percent;
116 u_int16_t dev_handle;
117 u_int16_t enclosure;
118 u_int16_t expander;
119 u_int8_t phy_num;
120 u_int8_t physical_port;
121 };
122
123 struct mpii_ccb {
124 struct mpii_softc *ccb_sc;
125
126 void * ccb_cookie;
127 bus_dmamap_t ccb_dmamap;
128
129 bus_addr_t ccb_offset;
130 void *ccb_cmd;
131 bus_addr_t ccb_cmd_dva;
132 u_int16_t ccb_dev_handle;
133 u_int16_t ccb_smid;
134
135 volatile enum {
136 MPII_CCB_FREE,
137 MPII_CCB_READY,
138 MPII_CCB_QUEUED,
139 MPII_CCB_TIMEOUT
140 } ccb_state;
141
142 void (*ccb_done)(struct mpii_ccb *);
143 struct mpii_rcb *ccb_rcb;
144
145 SIMPLEQ_ENTRY(mpii_ccb) ccb_link;
146 };
147
148 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
149
150 struct mpii_softc {
151 struct device sc_dev;
152
153 pci_chipset_tag_t sc_pc;
154 pcitag_t sc_tag;
155
156 void *sc_ih;
157
158 int sc_flags;
159 #define MPII_F_RAID (1<<1)
160 #define MPII_F_SAS3 (1<<2)
161 #define MPII_F_AERO (1<<3)
162
163 struct scsibus_softc *sc_scsibus;
164 unsigned int sc_pending;
165
166 struct mpii_device **sc_devs;
167
168 bus_space_tag_t sc_iot;
169 bus_space_handle_t sc_ioh;
170 bus_size_t sc_ios;
171 bus_dma_tag_t sc_dmat;
172
173 struct mutex sc_req_mtx;
174 struct mutex sc_rep_mtx;
175
176 ushort sc_reply_size;
177 ushort sc_request_size;
178
179 ushort sc_max_cmds;
180 ushort sc_num_reply_frames;
181 u_int sc_reply_free_qdepth;
182 u_int sc_reply_post_qdepth;
183
184 ushort sc_chain_sge;
185 ushort sc_max_sgl;
186 int sc_max_chain;
187
188 u_int8_t sc_ioc_event_replay;
189
190 u_int8_t sc_porttype;
191 u_int8_t sc_max_volumes;
192 u_int16_t sc_max_devices;
193 u_int16_t sc_vd_count;
194 u_int16_t sc_vd_id_low;
195 u_int16_t sc_pd_id_start;
196 int sc_ioc_number;
197 u_int8_t sc_vf_id;
198
199 struct mpii_ccb *sc_ccbs;
200 struct mpii_ccb_list sc_ccb_free;
201 struct mutex sc_ccb_free_mtx;
202
203 struct mutex sc_ccb_mtx;
204 /*
205 * this protects the ccb state and list entry
206 * between mpii_scsi_cmd and scsidone.
207 */
208
209 struct mpii_ccb_list sc_ccb_tmos;
210 struct scsi_iohandler sc_ccb_tmo_handler;
211
212 struct scsi_iopool sc_iopool;
213
214 struct mpii_dmamem *sc_requests;
215
216 struct mpii_dmamem *sc_replies;
217 struct mpii_rcb *sc_rcbs;
218
219 struct mpii_dmamem *sc_reply_postq;
220 struct mpii_reply_descr *sc_reply_postq_kva;
221 u_int sc_reply_post_host_index;
222
223 struct mpii_dmamem *sc_reply_freeq;
224 u_int sc_reply_free_host_index;
225
226 struct mpii_rcb_list sc_evt_sas_queue;
227 struct mutex sc_evt_sas_mtx;
228 struct task sc_evt_sas_task;
229
230 struct mpii_rcb_list sc_evt_ack_queue;
231 struct mutex sc_evt_ack_mtx;
232 struct scsi_iohandler sc_evt_ack_handler;
233
234 /* scsi ioctl from sd device */
235 int (*sc_ioctl)(struct device *, u_long, caddr_t);
236
237 int sc_nsensors;
238 struct ksensor *sc_sensors;
239 struct ksensordev sc_sensordev;
240 };
241
242 int mpii_match(struct device *, void *, void *);
243 void mpii_attach(struct device *, struct device *, void *);
244 int mpii_detach(struct device *, int);
245
246 int mpii_intr(void *);
247
248 const struct cfattach mpii_ca = {
249 sizeof(struct mpii_softc),
250 mpii_match,
251 mpii_attach,
252 mpii_detach
253 };
254
255 struct cfdriver mpii_cd = {
256 NULL,
257 "mpii",
258 DV_DULL
259 };
260
261 void mpii_scsi_cmd(struct scsi_xfer *);
262 void mpii_scsi_cmd_done(struct mpii_ccb *);
263 int mpii_scsi_probe(struct scsi_link *);
264 int mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
265
266 const struct scsi_adapter mpii_switch = {
267 mpii_scsi_cmd, NULL, mpii_scsi_probe, NULL, mpii_scsi_ioctl
268 };
269
270 struct mpii_dmamem *
271 mpii_dmamem_alloc(struct mpii_softc *, size_t);
272 void mpii_dmamem_free(struct mpii_softc *,
273 struct mpii_dmamem *);
274 int mpii_alloc_ccbs(struct mpii_softc *);
275 void * mpii_get_ccb(void *);
276 void mpii_put_ccb(void *, void *);
277 int mpii_alloc_replies(struct mpii_softc *);
278 int mpii_alloc_queues(struct mpii_softc *);
279 void mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
280 void mpii_push_replies(struct mpii_softc *);
281
282 void mpii_scsi_cmd_tmo(void *);
283 void mpii_scsi_cmd_tmo_handler(void *, void *);
284 void mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
285
286 int mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
287 int mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
288 struct mpii_device *
289 mpii_find_dev(struct mpii_softc *, u_int16_t);
290
291 void mpii_start(struct mpii_softc *, struct mpii_ccb *);
292 int mpii_poll(struct mpii_softc *, struct mpii_ccb *);
293 void mpii_poll_done(struct mpii_ccb *);
294 struct mpii_rcb *
295 mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
296
297 void mpii_wait(struct mpii_softc *, struct mpii_ccb *);
298 void mpii_wait_done(struct mpii_ccb *);
299
300 void mpii_init_queues(struct mpii_softc *);
301
302 int mpii_load_xs(struct mpii_ccb *);
303 int mpii_load_xs_sas3(struct mpii_ccb *);
304
305 u_int32_t mpii_read(struct mpii_softc *, bus_size_t);
306 void mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
307 int mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
308 u_int32_t);
309 int mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
310 u_int32_t);
311
312 int mpii_init(struct mpii_softc *);
313 int mpii_reset_soft(struct mpii_softc *);
314 int mpii_reset_hard(struct mpii_softc *);
315
316 int mpii_handshake_send(struct mpii_softc *, void *, size_t);
317 int mpii_handshake_recv_dword(struct mpii_softc *,
318 u_int32_t *);
319 int mpii_handshake_recv(struct mpii_softc *, void *, size_t);
320
321 void mpii_empty_done(struct mpii_ccb *);
322
323 int mpii_iocinit(struct mpii_softc *);
324 int mpii_iocfacts(struct mpii_softc *);
325 int mpii_portfacts(struct mpii_softc *);
326 int mpii_portenable(struct mpii_softc *);
327 int mpii_cfg_coalescing(struct mpii_softc *);
328 int mpii_board_info(struct mpii_softc *);
329 int mpii_target_map(struct mpii_softc *);
330
331 int mpii_eventnotify(struct mpii_softc *);
332 void mpii_eventnotify_done(struct mpii_ccb *);
333 void mpii_eventack(void *, void *);
334 void mpii_eventack_done(struct mpii_ccb *);
335 void mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
336 void mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
337 void mpii_event_sas(void *);
338 void mpii_event_raid(struct mpii_softc *,
339 struct mpii_msg_event_reply *);
340 void mpii_event_discovery(struct mpii_softc *,
341 struct mpii_msg_event_reply *);
342
343 void mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
344
345 int mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
346 u_int8_t, u_int32_t, int, void *);
347 int mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
348 void *, int, void *, size_t);
349
350 int mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
351
352 #if NBIO > 0
353 int mpii_ioctl(struct device *, u_long, caddr_t);
354 int mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
355 int mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
356 int mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
357 int mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
358 int, int *);
359 int mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
360 u_int8_t);
361 struct mpii_device *
362 mpii_find_vol(struct mpii_softc *, int);
363 #ifndef SMALL_KERNEL
364 int mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
365 int mpii_create_sensors(struct mpii_softc *);
366 void mpii_refresh_sensors(void *);
367 #endif /* SMALL_KERNEL */
368 #endif /* NBIO > 0 */
369
370 #define DEVNAME(s) ((s)->sc_dev.dv_xname)
371
372 #define dwordsof(s) (sizeof(s) / sizeof(u_int32_t))
373
374 #define mpii_read_db(s) mpii_read((s), MPII_DOORBELL)
375 #define mpii_write_db(s, v) mpii_write((s), MPII_DOORBELL, (v))
376 #define mpii_read_intr(s) mpii_read((s), MPII_INTR_STATUS)
377 #define mpii_write_intr(s, v) mpii_write((s), MPII_INTR_STATUS, (v))
378 #define mpii_reply_waiting(s) ((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
379 == MPII_INTR_STATUS_REPLY)
380
381 #define mpii_write_reply_free(s, v) \
382 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
383 MPII_REPLY_FREE_HOST_INDEX, (v))
384 #define mpii_write_reply_post(s, v) \
385 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
386 MPII_REPLY_POST_HOST_INDEX, (v))
387
388 #define mpii_wait_db_int(s) mpii_wait_ne((s), MPII_INTR_STATUS, \
389 MPII_INTR_STATUS_IOC2SYSDB, 0)
390 #define mpii_wait_db_ack(s) mpii_wait_eq((s), MPII_INTR_STATUS, \
391 MPII_INTR_STATUS_SYS2IOCDB, 0)
392
393 static inline void
mpii_dvatosge(struct mpii_sge * sge,u_int64_t dva)394 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
395 {
396 htolem32(&sge->sg_addr_lo, dva);
397 htolem32(&sge->sg_addr_hi, dva >> 32);
398 }
399
400 #define MPII_PG_EXTENDED (1<<0)
401 #define MPII_PG_POLL (1<<1)
402 #define MPII_PG_FMT "\020" "\002POLL" "\001EXTENDED"
403
404 static const struct pci_matchid mpii_devices[] = {
405 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2004 },
406 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2008 },
407 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SSS6200 },
408 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_3 },
409 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_4 },
410 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2108_5 },
411 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_1 },
412 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2116_2 },
413 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_1 },
414 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_2 },
415 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_3 },
416 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_4 },
417 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_5 },
418 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2208_6 },
419 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_1 },
420 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_2 },
421 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS2308_3 },
422 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3004 },
423 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3008 },
424 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_1 },
425 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_2 },
426 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_3 },
427 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3108_4 },
428 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3408 },
429 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3416 },
430 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508 },
431 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3508_1 },
432 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516 },
433 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS3516_1 },
434 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS38XX },
435 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS38XX_1 },
436 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS39XX },
437 { PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_SAS39XX_1 },
438 };
439
440 int
mpii_match(struct device * parent,void * match,void * aux)441 mpii_match(struct device *parent, void *match, void *aux)
442 {
443 return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices)));
444 }
445
446 void
mpii_attach(struct device * parent,struct device * self,void * aux)447 mpii_attach(struct device *parent, struct device *self, void *aux)
448 {
449 struct mpii_softc *sc = (struct mpii_softc *)self;
450 struct pci_attach_args *pa = aux;
451 pcireg_t memtype;
452 int r;
453 pci_intr_handle_t ih;
454 struct scsibus_attach_args saa;
455 struct mpii_ccb *ccb;
456
457 sc->sc_pc = pa->pa_pc;
458 sc->sc_tag = pa->pa_tag;
459 sc->sc_dmat = pa->pa_dmat;
460
461 mtx_init(&sc->sc_req_mtx, IPL_BIO);
462 mtx_init(&sc->sc_rep_mtx, IPL_BIO);
463
464 /* find the appropriate memory base */
465 for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
466 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
467 if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
468 break;
469 }
470 if (r >= PCI_MAPREG_END) {
471 printf(": unable to locate system interface registers\n");
472 return;
473 }
474
475 if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
476 NULL, &sc->sc_ios, 0xFF) != 0) {
477 printf(": unable to map system interface registers\n");
478 return;
479 }
480
481 /* disable the expansion rom */
482 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG,
483 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) &
484 ~PCI_ROM_ENABLE);
485
486 /* disable interrupts */
487 mpii_write(sc, MPII_INTR_MASK,
488 MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
489 MPII_INTR_MASK_DOORBELL);
490
491 /* hook up the interrupt */
492 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
493 printf(": unable to map interrupt\n");
494 goto unmap;
495 }
496 printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
497
498 switch (PCI_PRODUCT(pa->pa_id)) {
499 case PCI_PRODUCT_SYMBIOS_SAS38XX:
500 case PCI_PRODUCT_SYMBIOS_SAS38XX_1:
501 case PCI_PRODUCT_SYMBIOS_SAS39XX:
502 case PCI_PRODUCT_SYMBIOS_SAS39XX_1:
503 SET(sc->sc_flags, MPII_F_AERO);
504 break;
505 }
506
507 if (mpii_iocfacts(sc) != 0) {
508 printf("%s: unable to get iocfacts\n", DEVNAME(sc));
509 goto unmap;
510 }
511
512 if (mpii_init(sc) != 0) {
513 printf("%s: unable to initialize ioc\n", DEVNAME(sc));
514 goto unmap;
515 }
516
517 if (mpii_alloc_ccbs(sc) != 0) {
518 /* error already printed */
519 goto unmap;
520 }
521
522 if (mpii_alloc_replies(sc) != 0) {
523 printf("%s: unable to allocated reply space\n", DEVNAME(sc));
524 goto free_ccbs;
525 }
526
527 if (mpii_alloc_queues(sc) != 0) {
528 printf("%s: unable to allocate reply queues\n", DEVNAME(sc));
529 goto free_replies;
530 }
531
532 if (mpii_iocinit(sc) != 0) {
533 printf("%s: unable to send iocinit\n", DEVNAME(sc));
534 goto free_queues;
535 }
536
537 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
538 MPII_DOORBELL_STATE_OPER) != 0) {
539 printf("%s: state: 0x%08x\n", DEVNAME(sc),
540 mpii_read_db(sc) & MPII_DOORBELL_STATE);
541 printf("%s: operational state timeout\n", DEVNAME(sc));
542 goto free_queues;
543 }
544
545 mpii_push_replies(sc);
546 mpii_init_queues(sc);
547
548 if (mpii_board_info(sc) != 0) {
549 printf("%s: unable to get manufacturing page 0\n",
550 DEVNAME(sc));
551 goto free_queues;
552 }
553
554 if (mpii_portfacts(sc) != 0) {
555 printf("%s: unable to get portfacts\n", DEVNAME(sc));
556 goto free_queues;
557 }
558
559 if (mpii_target_map(sc) != 0) {
560 printf("%s: unable to setup target mappings\n", DEVNAME(sc));
561 goto free_queues;
562 }
563
564 if (mpii_cfg_coalescing(sc) != 0) {
565 printf("%s: unable to configure coalescing\n", DEVNAME(sc));
566 goto free_queues;
567 }
568
569 /* XXX bail on unsupported porttype? */
570 if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
571 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) ||
572 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) {
573 if (mpii_eventnotify(sc) != 0) {
574 printf("%s: unable to enable events\n", DEVNAME(sc));
575 goto free_queues;
576 }
577 }
578
579 sc->sc_devs = mallocarray(sc->sc_max_devices,
580 sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO);
581 if (sc->sc_devs == NULL) {
582 printf("%s: unable to allocate memory for mpii_device\n",
583 DEVNAME(sc));
584 goto free_queues;
585 }
586
587 if (mpii_portenable(sc) != 0) {
588 printf("%s: unable to enable port\n", DEVNAME(sc));
589 goto free_devs;
590 }
591
592 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
593 mpii_intr, sc, sc->sc_dev.dv_xname);
594 if (sc->sc_ih == NULL)
595 goto free_devs;
596
597 /* force autoconf to wait for the first sas discovery to complete */
598 sc->sc_pending = 1;
599 config_pending_incr();
600
601 saa.saa_adapter = &mpii_switch;
602 saa.saa_adapter_softc = sc;
603 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
604 saa.saa_adapter_buswidth = sc->sc_max_devices;
605 saa.saa_luns = 1;
606 saa.saa_openings = sc->sc_max_cmds - 1;
607 saa.saa_pool = &sc->sc_iopool;
608 saa.saa_quirks = saa.saa_flags = 0;
609 saa.saa_wwpn = saa.saa_wwnn = 0;
610
611 sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
612 &saa, scsiprint);
613
614 /* enable interrupts */
615 mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
616 | MPII_INTR_MASK_RESET);
617
618 #if NBIO > 0
619 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
620 if (bio_register(&sc->sc_dev, mpii_ioctl) != 0)
621 panic("%s: controller registration failed",
622 DEVNAME(sc));
623 else
624 sc->sc_ioctl = mpii_ioctl;
625
626 #ifndef SMALL_KERNEL
627 if (mpii_create_sensors(sc) != 0)
628 printf("%s: unable to create sensors\n", DEVNAME(sc));
629 #endif
630 }
631 #endif
632
633 return;
634
635 free_devs:
636 free(sc->sc_devs, M_DEVBUF, 0);
637 sc->sc_devs = NULL;
638
639 free_queues:
640 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
641 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
642 mpii_dmamem_free(sc, sc->sc_reply_freeq);
643
644 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
645 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
646 mpii_dmamem_free(sc, sc->sc_reply_postq);
647
648 free_replies:
649 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
650 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
651 mpii_dmamem_free(sc, sc->sc_replies);
652
653 free_ccbs:
654 while ((ccb = mpii_get_ccb(sc)) != NULL)
655 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
656 mpii_dmamem_free(sc, sc->sc_requests);
657 free(sc->sc_ccbs, M_DEVBUF, 0);
658
659 unmap:
660 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
661 sc->sc_ios = 0;
662 }
663
664 int
mpii_detach(struct device * self,int flags)665 mpii_detach(struct device *self, int flags)
666 {
667 struct mpii_softc *sc = (struct mpii_softc *)self;
668
669 if (sc->sc_ih != NULL) {
670 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
671 sc->sc_ih = NULL;
672 }
673 if (sc->sc_ios != 0) {
674 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
675 sc->sc_ios = 0;
676 }
677
678 return (0);
679 }
680
681 int
mpii_intr(void * arg)682 mpii_intr(void *arg)
683 {
684 struct mpii_rcb_list evts = SIMPLEQ_HEAD_INITIALIZER(evts);
685 struct mpii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
686 struct mpii_softc *sc = arg;
687 struct mpii_reply_descr *postq = sc->sc_reply_postq_kva, *rdp;
688 struct mpii_ccb *ccb;
689 struct mpii_rcb *rcb;
690 int smid;
691 u_int idx;
692 int rv = 0;
693
694 mtx_enter(&sc->sc_rep_mtx);
695 bus_dmamap_sync(sc->sc_dmat,
696 MPII_DMA_MAP(sc->sc_reply_postq),
697 0, sc->sc_reply_post_qdepth * sizeof(*rdp),
698 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
699
700 idx = sc->sc_reply_post_host_index;
701 for (;;) {
702 rdp = &postq[idx];
703 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
704 MPII_REPLY_DESCR_UNUSED)
705 break;
706 if (rdp->data == 0xffffffff) {
707 /*
708 * ioc is still writing to the reply post queue
709 * race condition - bail!
710 */
711 break;
712 }
713
714 smid = lemtoh16(&rdp->smid);
715 rcb = mpii_reply(sc, rdp);
716
717 if (smid) {
718 ccb = &sc->sc_ccbs[smid - 1];
719 ccb->ccb_state = MPII_CCB_READY;
720 ccb->ccb_rcb = rcb;
721 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
722 } else
723 SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
724
725 if (++idx >= sc->sc_reply_post_qdepth)
726 idx = 0;
727
728 rv = 1;
729 }
730
731 bus_dmamap_sync(sc->sc_dmat,
732 MPII_DMA_MAP(sc->sc_reply_postq),
733 0, sc->sc_reply_post_qdepth * sizeof(*rdp),
734 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
735
736 if (rv)
737 mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
738
739 mtx_leave(&sc->sc_rep_mtx);
740
741 if (rv == 0)
742 return (0);
743
744 while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
745 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
746 ccb->ccb_done(ccb);
747 }
748 while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
749 SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
750 mpii_event_process(sc, rcb);
751 }
752
753 return (1);
754 }
755
756 int
mpii_load_xs_sas3(struct mpii_ccb * ccb)757 mpii_load_xs_sas3(struct mpii_ccb *ccb)
758 {
759 struct mpii_softc *sc = ccb->ccb_sc;
760 struct scsi_xfer *xs = ccb->ccb_cookie;
761 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
762 struct mpii_ieee_sge *csge, *nsge, *sge;
763 bus_dmamap_t dmap = ccb->ccb_dmamap;
764 int i, error;
765
766 /* Request frame structure is described in the mpii_iocfacts */
767 nsge = (struct mpii_ieee_sge *)(io + 1);
768
769 /* zero length transfer still requires an SGE */
770 if (xs->datalen == 0) {
771 nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
772 return (0);
773 }
774
775 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
776 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
777 if (error) {
778 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
779 return (1);
780 }
781
782 csge = NULL;
783 if (dmap->dm_nsegs > sc->sc_chain_sge) {
784 csge = nsge + sc->sc_chain_sge;
785
786 /* offset to the chain sge from the beginning */
787 io->chain_offset = ((caddr_t)csge - (caddr_t)io) / sizeof(*sge);
788 }
789
790 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
791 if (nsge == csge) {
792 nsge++;
793
794 /* address of the next sge */
795 htolem64(&csge->sg_addr, ccb->ccb_cmd_dva +
796 ((caddr_t)nsge - (caddr_t)io));
797 htolem32(&csge->sg_len, (dmap->dm_nsegs - i) *
798 sizeof(*sge));
799 csge->sg_next_chain_offset = 0;
800 csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
801 MPII_IEEE_SGE_ADDR_SYSTEM;
802
803 if ((dmap->dm_nsegs - i) > sc->sc_max_chain) {
804 csge->sg_next_chain_offset = sc->sc_max_chain;
805 csge += sc->sc_max_chain;
806 }
807 }
808
809 sge = nsge;
810 sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
811 sge->sg_next_chain_offset = 0;
812 htolem32(&sge->sg_len, dmap->dm_segs[i].ds_len);
813 htolem64(&sge->sg_addr, dmap->dm_segs[i].ds_addr);
814 }
815
816 /* terminate list */
817 sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
818
819 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
820 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
821 BUS_DMASYNC_PREWRITE);
822
823 return (0);
824 }
825
826 int
mpii_load_xs(struct mpii_ccb * ccb)827 mpii_load_xs(struct mpii_ccb *ccb)
828 {
829 struct mpii_softc *sc = ccb->ccb_sc;
830 struct scsi_xfer *xs = ccb->ccb_cookie;
831 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
832 struct mpii_sge *csge, *nsge, *sge;
833 bus_dmamap_t dmap = ccb->ccb_dmamap;
834 u_int32_t flags;
835 u_int16_t len;
836 int i, error;
837
838 /* Request frame structure is described in the mpii_iocfacts */
839 nsge = (struct mpii_sge *)(io + 1);
840 csge = nsge + sc->sc_chain_sge;
841
842 /* zero length transfer still requires an SGE */
843 if (xs->datalen == 0) {
844 nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
845 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
846 return (0);
847 }
848
849 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
850 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
851 if (error) {
852 printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
853 return (1);
854 }
855
856 /* safe default starting flags */
857 flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
858 if (xs->flags & SCSI_DATA_OUT)
859 flags |= MPII_SGE_FL_DIR_OUT;
860
861 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
862 if (nsge == csge) {
863 nsge++;
864 /* offset to the chain sge from the beginning */
865 io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
866 /* length of the sgl segment we're pointing to */
867 len = (dmap->dm_nsegs - i) * sizeof(*sge);
868 htolem32(&csge->sg_hdr, MPII_SGE_FL_TYPE_CHAIN |
869 MPII_SGE_FL_SIZE_64 | len);
870 /* address of the next sge */
871 mpii_dvatosge(csge, ccb->ccb_cmd_dva +
872 ((caddr_t)nsge - (caddr_t)io));
873 }
874
875 sge = nsge;
876 htolem32(&sge->sg_hdr, flags | dmap->dm_segs[i].ds_len);
877 mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
878 }
879
880 /* terminate list */
881 sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
882 MPII_SGE_FL_EOL);
883
884 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
885 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
886 BUS_DMASYNC_PREWRITE);
887
888 return (0);
889 }
890
891 int
mpii_scsi_probe(struct scsi_link * link)892 mpii_scsi_probe(struct scsi_link *link)
893 {
894 struct mpii_softc *sc = link->bus->sb_adapter_softc;
895 struct mpii_cfg_sas_dev_pg0 pg0;
896 struct mpii_ecfg_hdr ehdr;
897 struct mpii_device *dev;
898 uint32_t address;
899 int flags;
900
901 if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) &&
902 (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) &&
903 (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_TRI_MODE))
904 return (ENXIO);
905
906 dev = sc->sc_devs[link->target];
907 if (dev == NULL)
908 return (1);
909
910 flags = dev->flags;
911 if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED))
912 return (1);
913
914 if (ISSET(flags, MPII_DF_VOLUME)) {
915 struct mpii_cfg_hdr hdr;
916 struct mpii_cfg_raid_vol_pg1 vpg;
917 size_t pagelen;
918
919 address = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
920
921 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
922 1, address, MPII_PG_POLL, &hdr) != 0)
923 return (EINVAL);
924
925 memset(&vpg, 0, sizeof(vpg));
926 /* avoid stack trash on future page growth */
927 pagelen = min(sizeof(vpg), hdr.page_length * 4);
928
929 if (mpii_req_cfg_page(sc, address, MPII_PG_POLL, &hdr, 1,
930 &vpg, pagelen) != 0)
931 return (EINVAL);
932
933 link->port_wwn = letoh64(vpg.wwid);
934 /*
935 * WWIDs generated by LSI firmware are not IEEE NAA compliant
936 * and historical practise in OBP on sparc64 is to set the top
937 * nibble to 3 to indicate that this is a RAID volume.
938 */
939 link->port_wwn &= 0x0fffffffffffffff;
940 link->port_wwn |= 0x3000000000000000;
941
942 return (0);
943 }
944
945 memset(&ehdr, 0, sizeof(ehdr));
946 ehdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
947 ehdr.page_number = 0;
948 ehdr.page_version = 0;
949 ehdr.ext_page_type = MPII_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE;
950 ehdr.ext_page_length = htole16(sizeof(pg0) / 4); /* dwords */
951
952 address = MPII_PGAD_SAS_DEVICE_FORM_HANDLE | (uint32_t)dev->dev_handle;
953 if (mpii_req_cfg_page(sc, address, MPII_PG_EXTENDED,
954 &ehdr, 1, &pg0, sizeof(pg0)) != 0) {
955 printf("%s: unable to fetch SAS device page 0 for target %u\n",
956 DEVNAME(sc), link->target);
957
958 return (0); /* the handle should still work */
959 }
960
961 link->port_wwn = letoh64(pg0.sas_addr);
962 link->node_wwn = letoh64(pg0.device_name);
963
964 if (ISSET(lemtoh32(&pg0.device_info),
965 MPII_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
966 link->flags |= SDEV_ATAPI;
967 }
968
969 return (0);
970 }
971
972 u_int32_t
mpii_read(struct mpii_softc * sc,bus_size_t r)973 mpii_read(struct mpii_softc *sc, bus_size_t r)
974 {
975 u_int32_t rv;
976 int i;
977
978 if (ISSET(sc->sc_flags, MPII_F_AERO)) {
979 i = 0;
980 do {
981 if (i > 0)
982 DNPRINTF(MPII_D_RW, "%s: mpii_read retry %d\n",
983 DEVNAME(sc), i);
984 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
985 BUS_SPACE_BARRIER_READ);
986 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
987 i++;
988 } while (rv == 0 && i < 3);
989 } else {
990 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
991 BUS_SPACE_BARRIER_READ);
992 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
993 }
994
995 DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
996
997 return (rv);
998 }
999
1000 void
mpii_write(struct mpii_softc * sc,bus_size_t r,u_int32_t v)1001 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
1002 {
1003 DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
1004
1005 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1006 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1007 BUS_SPACE_BARRIER_WRITE);
1008 }
1009
1010
1011 int
mpii_wait_eq(struct mpii_softc * sc,bus_size_t r,u_int32_t mask,u_int32_t target)1012 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
1013 u_int32_t target)
1014 {
1015 int i;
1016
1017 DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
1018 mask, target);
1019
1020 for (i = 0; i < 15000; i++) {
1021 if ((mpii_read(sc, r) & mask) == target)
1022 return (0);
1023 delay(1000);
1024 }
1025
1026 return (1);
1027 }
1028
1029 int
mpii_wait_ne(struct mpii_softc * sc,bus_size_t r,u_int32_t mask,u_int32_t target)1030 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
1031 u_int32_t target)
1032 {
1033 int i;
1034
1035 DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
1036 mask, target);
1037
1038 for (i = 0; i < 15000; i++) {
1039 if ((mpii_read(sc, r) & mask) != target)
1040 return (0);
1041 delay(1000);
1042 }
1043
1044 return (1);
1045 }
1046
1047 int
mpii_init(struct mpii_softc * sc)1048 mpii_init(struct mpii_softc *sc)
1049 {
1050 u_int32_t db;
1051 int i;
1052
1053 /* spin until the ioc leaves the reset state */
1054 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1055 MPII_DOORBELL_STATE_RESET) != 0) {
1056 DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1057 "reset state\n", DEVNAME(sc));
1058 return (1);
1059 }
1060
1061 /* check current ownership */
1062 db = mpii_read_db(sc);
1063 if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1064 DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1065 DEVNAME(sc));
1066 return (0);
1067 }
1068
1069 for (i = 0; i < 5; i++) {
1070 switch (db & MPII_DOORBELL_STATE) {
1071 case MPII_DOORBELL_STATE_READY:
1072 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1073 DEVNAME(sc));
1074 return (0);
1075
1076 case MPII_DOORBELL_STATE_OPER:
1077 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1078 DEVNAME(sc));
1079 if (sc->sc_ioc_event_replay)
1080 mpii_reset_soft(sc);
1081 else
1082 mpii_reset_hard(sc);
1083 break;
1084
1085 case MPII_DOORBELL_STATE_FAULT:
1086 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1087 "reset hard\n" , DEVNAME(sc));
1088 mpii_reset_hard(sc);
1089 break;
1090
1091 case MPII_DOORBELL_STATE_RESET:
1092 DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1093 "out of reset\n", DEVNAME(sc));
1094 if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1095 MPII_DOORBELL_STATE_RESET) != 0)
1096 return (1);
1097 break;
1098 }
1099 db = mpii_read_db(sc);
1100 }
1101
1102 return (1);
1103 }
1104
1105 int
mpii_reset_soft(struct mpii_softc * sc)1106 mpii_reset_soft(struct mpii_softc *sc)
1107 {
1108 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1109
1110 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1111 return (1);
1112 }
1113
1114 mpii_write_db(sc,
1115 MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1116
1117 /* XXX LSI waits 15 sec */
1118 if (mpii_wait_db_ack(sc) != 0)
1119 return (1);
1120
1121 /* XXX LSI waits 15 sec */
1122 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1123 MPII_DOORBELL_STATE_READY) != 0)
1124 return (1);
1125
1126 /* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1127
1128 return (0);
1129 }
1130
1131 int
mpii_reset_hard(struct mpii_softc * sc)1132 mpii_reset_hard(struct mpii_softc *sc)
1133 {
1134 u_int16_t i;
1135
1136 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1137
1138 mpii_write_intr(sc, 0);
1139
1140 /* enable diagnostic register */
1141 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1142 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1143 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1144 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1145 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1146 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1147 mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1148
1149 delay(100);
1150
1151 if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1152 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1153 "diagnostic read/write\n", DEVNAME(sc));
1154 return(1);
1155 }
1156
1157 /* reset ioc */
1158 mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1159
1160 /* 240 milliseconds */
1161 delay(240000);
1162
1163
1164 /* XXX this whole function should be more robust */
1165
1166 /* XXX read the host diagnostic reg until reset adapter bit clears ? */
1167 for (i = 0; i < 30000; i++) {
1168 if ((mpii_read(sc, MPII_HOSTDIAG) &
1169 MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1170 break;
1171 delay(10000);
1172 }
1173
1174 /* disable diagnostic register */
1175 mpii_write(sc, MPII_WRITESEQ, 0xff);
1176
1177 /* XXX what else? */
1178
1179 DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1180
1181 return(0);
1182 }
1183
1184 int
mpii_handshake_send(struct mpii_softc * sc,void * buf,size_t dwords)1185 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1186 {
1187 u_int32_t *query = buf;
1188 int i;
1189
1190 /* make sure the doorbell is not in use. */
1191 if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1192 return (1);
1193
1194 /* clear pending doorbell interrupts */
1195 if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1196 mpii_write_intr(sc, 0);
1197
1198 /*
1199 * first write the doorbell with the handshake function and the
1200 * dword count.
1201 */
1202 mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1203 MPII_DOORBELL_DWORDS(dwords));
1204
1205 /*
1206 * the doorbell used bit will be set because a doorbell function has
1207 * started. wait for the interrupt and then ack it.
1208 */
1209 if (mpii_wait_db_int(sc) != 0)
1210 return (1);
1211 mpii_write_intr(sc, 0);
1212
1213 /* poll for the acknowledgement. */
1214 if (mpii_wait_db_ack(sc) != 0)
1215 return (1);
1216
1217 /* write the query through the doorbell. */
1218 for (i = 0; i < dwords; i++) {
1219 mpii_write_db(sc, htole32(query[i]));
1220 if (mpii_wait_db_ack(sc) != 0)
1221 return (1);
1222 }
1223
1224 return (0);
1225 }
1226
1227 int
mpii_handshake_recv_dword(struct mpii_softc * sc,u_int32_t * dword)1228 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1229 {
1230 u_int16_t *words = (u_int16_t *)dword;
1231 int i;
1232
1233 for (i = 0; i < 2; i++) {
1234 if (mpii_wait_db_int(sc) != 0)
1235 return (1);
1236 words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1237 mpii_write_intr(sc, 0);
1238 }
1239
1240 return (0);
1241 }
1242
1243 int
mpii_handshake_recv(struct mpii_softc * sc,void * buf,size_t dwords)1244 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1245 {
1246 struct mpii_msg_reply *reply = buf;
1247 u_int32_t *dbuf = buf, dummy;
1248 int i;
1249
1250 /* get the first dword so we can read the length out of the header. */
1251 if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1252 return (1);
1253
1254 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1255 DEVNAME(sc), dwords, reply->msg_length);
1256
1257 /*
1258 * the total length, in dwords, is in the message length field of the
1259 * reply header.
1260 */
1261 for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1262 if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1263 return (1);
1264 }
1265
1266 /* if there's extra stuff to come off the ioc, discard it */
1267 while (i++ < reply->msg_length) {
1268 if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1269 return (1);
1270 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1271 "0x%08x\n", DEVNAME(sc), dummy);
1272 }
1273
1274 /* wait for the doorbell used bit to be reset and clear the intr */
1275 if (mpii_wait_db_int(sc) != 0)
1276 return (1);
1277
1278 if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1279 return (1);
1280
1281 mpii_write_intr(sc, 0);
1282
1283 return (0);
1284 }
1285
1286 void
mpii_empty_done(struct mpii_ccb * ccb)1287 mpii_empty_done(struct mpii_ccb *ccb)
1288 {
1289 /* nothing to do */
1290 }
1291
1292 int
mpii_iocfacts(struct mpii_softc * sc)1293 mpii_iocfacts(struct mpii_softc *sc)
1294 {
1295 struct mpii_msg_iocfacts_request ifq;
1296 struct mpii_msg_iocfacts_reply ifp;
1297 int irs;
1298 int sge_size;
1299 u_int qdepth;
1300
1301 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1302
1303 memset(&ifq, 0, sizeof(ifq));
1304 memset(&ifp, 0, sizeof(ifp));
1305
1306 ifq.function = MPII_FUNCTION_IOC_FACTS;
1307
1308 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1309 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1310 DEVNAME(sc));
1311 return (1);
1312 }
1313
1314 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1315 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1316 DEVNAME(sc));
1317 return (1);
1318 }
1319
1320 sc->sc_ioc_number = ifp.ioc_number;
1321 sc->sc_vf_id = ifp.vf_id;
1322
1323 sc->sc_max_volumes = ifp.max_volumes;
1324 sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets);
1325
1326 if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1327 MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1328 SET(sc->sc_flags, MPII_F_RAID);
1329 if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1330 MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY))
1331 sc->sc_ioc_event_replay = 1;
1332
1333 sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit),
1334 MPII_REQUEST_CREDIT);
1335
1336 /* SAS3 and 3.5 controllers have different sgl layouts */
1337 if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1338 || (ifp.msg_version_min == 6)))
1339 SET(sc->sc_flags, MPII_F_SAS3);
1340
1341 /*
1342 * The host driver must ensure that there is at least one
1343 * unused entry in the Reply Free Queue. One way to ensure
1344 * that this requirement is met is to never allocate a number
1345 * of reply frames that is a multiple of 16.
1346 */
1347 sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1348 if (!(sc->sc_num_reply_frames % 16))
1349 sc->sc_num_reply_frames--;
1350
1351 /* must be multiple of 16 */
1352 sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1353 sc->sc_num_reply_frames;
1354 sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1355
1356 qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth);
1357 if (sc->sc_reply_post_qdepth > qdepth) {
1358 sc->sc_reply_post_qdepth = qdepth;
1359 if (sc->sc_reply_post_qdepth < 16) {
1360 printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1361 return (1);
1362 }
1363 sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1364 sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1365 }
1366
1367 sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1368 16 - (sc->sc_num_reply_frames % 16);
1369
1370 /*
1371 * Our request frame for an I/O operation looks like this:
1372 *
1373 * +-------------------+ -.
1374 * | mpii_msg_scsi_io | |
1375 * +-------------------| |
1376 * | mpii_sge | |
1377 * + - - - - - - - - - + |
1378 * | ... | > ioc_request_frame_size
1379 * + - - - - - - - - - + |
1380 * | mpii_sge (tail) | |
1381 * + - - - - - - - - - + |
1382 * | mpii_sge (csge) | | --.
1383 * + - - - - - - - - - + -' | chain sge points to the next sge
1384 * | mpii_sge |<-----'
1385 * + - - - - - - - - - +
1386 * | ... |
1387 * + - - - - - - - - - +
1388 * | mpii_sge (tail) |
1389 * +-------------------+
1390 * | |
1391 * ~~~~~~~~~~~~~~~~~~~~~
1392 * | |
1393 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1394 * | scsi_sense_data |
1395 * +-------------------+
1396 *
1397 * If the controller gives us a maximum chain size, there can be
1398 * multiple chain sges, each of which points to the sge following it.
1399 * Otherwise, there will only be one chain sge.
1400 */
1401
1402 /* both sizes are in 32-bit words */
1403 sc->sc_reply_size = ifp.reply_frame_size * 4;
1404 irs = lemtoh16(&ifp.ioc_request_frame_size) * 4;
1405 sc->sc_request_size = MPII_REQUEST_SIZE;
1406 /* make sure we have enough space for scsi sense data */
1407 if (irs > sc->sc_request_size) {
1408 sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1409 sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1410 }
1411
1412 if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1413 sge_size = sizeof(struct mpii_ieee_sge);
1414 } else {
1415 sge_size = sizeof(struct mpii_sge);
1416 }
1417
1418 /* offset to the chain sge */
1419 sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1420 sge_size - 1;
1421
1422 sc->sc_max_chain = lemtoh16(&ifp.ioc_max_chain_seg_size);
1423
1424 /*
1425 * A number of simple scatter-gather elements we can fit into the
1426 * request buffer after the I/O command minus the chain element(s).
1427 */
1428 sc->sc_max_sgl = (sc->sc_request_size -
1429 sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1430 sge_size - 1;
1431 if (sc->sc_max_chain > 0) {
1432 sc->sc_max_sgl -= (sc->sc_max_sgl - sc->sc_chain_sge) /
1433 sc->sc_max_chain;
1434 }
1435
1436 return (0);
1437 }
1438
1439 int
mpii_iocinit(struct mpii_softc * sc)1440 mpii_iocinit(struct mpii_softc *sc)
1441 {
1442 struct mpii_msg_iocinit_request iiq;
1443 struct mpii_msg_iocinit_reply iip;
1444
1445 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1446
1447 memset(&iiq, 0, sizeof(iiq));
1448 memset(&iip, 0, sizeof(iip));
1449
1450 iiq.function = MPII_FUNCTION_IOC_INIT;
1451 iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1452
1453 /* XXX JPG do something about vf_id */
1454 iiq.vf_id = 0;
1455
1456 iiq.msg_version_maj = 0x02;
1457 iiq.msg_version_min = 0x00;
1458
1459 /* XXX JPG ensure compliance with some level and hard-code? */
1460 iiq.hdr_version_unit = 0x00;
1461 iiq.hdr_version_dev = 0x00;
1462
1463 htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4);
1464
1465 htolem16(&iiq.reply_descriptor_post_queue_depth,
1466 sc->sc_reply_post_qdepth);
1467
1468 htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth);
1469
1470 htolem32(&iiq.sense_buffer_address_high,
1471 MPII_DMA_DVA(sc->sc_requests) >> 32);
1472
1473 htolem32(&iiq.system_reply_address_high,
1474 MPII_DMA_DVA(sc->sc_replies) >> 32);
1475
1476 htolem32(&iiq.system_request_frame_base_address_lo,
1477 MPII_DMA_DVA(sc->sc_requests));
1478 htolem32(&iiq.system_request_frame_base_address_hi,
1479 MPII_DMA_DVA(sc->sc_requests) >> 32);
1480
1481 htolem32(&iiq.reply_descriptor_post_queue_address_lo,
1482 MPII_DMA_DVA(sc->sc_reply_postq));
1483 htolem32(&iiq.reply_descriptor_post_queue_address_hi,
1484 MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1485
1486 htolem32(&iiq.reply_free_queue_address_lo,
1487 MPII_DMA_DVA(sc->sc_reply_freeq));
1488 htolem32(&iiq.reply_free_queue_address_hi,
1489 MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1490
1491 if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1492 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1493 DEVNAME(sc));
1494 return (1);
1495 }
1496
1497 if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1498 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1499 DEVNAME(sc));
1500 return (1);
1501 }
1502
1503 DNPRINTF(MPII_D_MISC, "%s: function: 0x%02x msg_length: %d "
1504 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1505 iip.msg_length, iip.whoinit);
1506 DNPRINTF(MPII_D_MISC, "%s: msg_flags: 0x%02x\n", DEVNAME(sc),
1507 iip.msg_flags);
1508 DNPRINTF(MPII_D_MISC, "%s: vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1509 iip.vf_id, iip.vp_id);
1510 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1511 lemtoh16(&iip.ioc_status));
1512 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1513 lemtoh32(&iip.ioc_loginfo));
1514
1515 if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1516 lemtoh32(&iip.ioc_loginfo))
1517 return (1);
1518
1519 return (0);
1520 }
1521
1522 void
mpii_push_reply(struct mpii_softc * sc,struct mpii_rcb * rcb)1523 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1524 {
1525 u_int32_t *rfp;
1526 u_int idx;
1527
1528 if (rcb == NULL)
1529 return;
1530
1531 idx = sc->sc_reply_free_host_index;
1532
1533 rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1534 htolem32(&rfp[idx], rcb->rcb_reply_dva);
1535
1536 if (++idx >= sc->sc_reply_free_qdepth)
1537 idx = 0;
1538
1539 mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1540 }
1541
1542 int
mpii_portfacts(struct mpii_softc * sc)1543 mpii_portfacts(struct mpii_softc *sc)
1544 {
1545 struct mpii_msg_portfacts_request *pfq;
1546 struct mpii_msg_portfacts_reply *pfp;
1547 struct mpii_ccb *ccb;
1548 int rv = 1;
1549
1550 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1551
1552 ccb = scsi_io_get(&sc->sc_iopool, 0);
1553 if (ccb == NULL) {
1554 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1555 DEVNAME(sc));
1556 return (rv);
1557 }
1558
1559 ccb->ccb_done = mpii_empty_done;
1560 pfq = ccb->ccb_cmd;
1561
1562 memset(pfq, 0, sizeof(*pfq));
1563
1564 pfq->function = MPII_FUNCTION_PORT_FACTS;
1565 pfq->chain_offset = 0;
1566 pfq->msg_flags = 0;
1567 pfq->port_number = 0;
1568 pfq->vp_id = 0;
1569 pfq->vf_id = 0;
1570
1571 if (mpii_poll(sc, ccb) != 0) {
1572 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1573 DEVNAME(sc));
1574 goto err;
1575 }
1576
1577 if (ccb->ccb_rcb == NULL) {
1578 DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1579 DEVNAME(sc));
1580 goto err;
1581 }
1582
1583 pfp = ccb->ccb_rcb->rcb_reply;
1584 sc->sc_porttype = pfp->port_type;
1585
1586 mpii_push_reply(sc, ccb->ccb_rcb);
1587 rv = 0;
1588 err:
1589 scsi_io_put(&sc->sc_iopool, ccb);
1590
1591 return (rv);
1592 }
1593
1594 void
mpii_eventack(void * cookie,void * io)1595 mpii_eventack(void *cookie, void *io)
1596 {
1597 struct mpii_softc *sc = cookie;
1598 struct mpii_ccb *ccb = io;
1599 struct mpii_rcb *rcb, *next;
1600 struct mpii_msg_event_reply *enp;
1601 struct mpii_msg_eventack_request *eaq;
1602
1603 mtx_enter(&sc->sc_evt_ack_mtx);
1604 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1605 if (rcb != NULL) {
1606 next = SIMPLEQ_NEXT(rcb, rcb_link);
1607 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
1608 }
1609 mtx_leave(&sc->sc_evt_ack_mtx);
1610
1611 if (rcb == NULL) {
1612 scsi_io_put(&sc->sc_iopool, ccb);
1613 return;
1614 }
1615
1616 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1617
1618 ccb->ccb_done = mpii_eventack_done;
1619 eaq = ccb->ccb_cmd;
1620
1621 eaq->function = MPII_FUNCTION_EVENT_ACK;
1622
1623 eaq->event = enp->event;
1624 eaq->event_context = enp->event_context;
1625
1626 mpii_push_reply(sc, rcb);
1627
1628 mpii_start(sc, ccb);
1629
1630 if (next != NULL)
1631 scsi_ioh_add(&sc->sc_evt_ack_handler);
1632 }
1633
1634 void
mpii_eventack_done(struct mpii_ccb * ccb)1635 mpii_eventack_done(struct mpii_ccb *ccb)
1636 {
1637 struct mpii_softc *sc = ccb->ccb_sc;
1638
1639 DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1640
1641 mpii_push_reply(sc, ccb->ccb_rcb);
1642 scsi_io_put(&sc->sc_iopool, ccb);
1643 }
1644
1645 int
mpii_portenable(struct mpii_softc * sc)1646 mpii_portenable(struct mpii_softc *sc)
1647 {
1648 struct mpii_msg_portenable_request *peq;
1649 struct mpii_ccb *ccb;
1650
1651 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1652
1653 ccb = scsi_io_get(&sc->sc_iopool, 0);
1654 if (ccb == NULL) {
1655 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1656 DEVNAME(sc));
1657 return (1);
1658 }
1659
1660 ccb->ccb_done = mpii_empty_done;
1661 peq = ccb->ccb_cmd;
1662
1663 peq->function = MPII_FUNCTION_PORT_ENABLE;
1664 peq->vf_id = sc->sc_vf_id;
1665
1666 if (mpii_poll(sc, ccb) != 0) {
1667 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1668 DEVNAME(sc));
1669 return (1);
1670 }
1671
1672 if (ccb->ccb_rcb == NULL) {
1673 DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1674 DEVNAME(sc));
1675 return (1);
1676 }
1677
1678 mpii_push_reply(sc, ccb->ccb_rcb);
1679 scsi_io_put(&sc->sc_iopool, ccb);
1680
1681 return (0);
1682 }
1683
1684 int
mpii_cfg_coalescing(struct mpii_softc * sc)1685 mpii_cfg_coalescing(struct mpii_softc *sc)
1686 {
1687 struct mpii_cfg_hdr hdr;
1688 struct mpii_cfg_ioc_pg1 ipg;
1689
1690 hdr.page_version = 0;
1691 hdr.page_length = sizeof(ipg) / 4;
1692 hdr.page_number = 1;
1693 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1694 memset(&ipg, 0, sizeof(ipg));
1695 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1696 sizeof(ipg)) != 0) {
1697 DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1698 "page 1\n", DEVNAME(sc));
1699 return (1);
1700 }
1701
1702 if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1703 return (0);
1704
1705 /* Disable coalescing */
1706 CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1707 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1708 sizeof(ipg)) != 0) {
1709 DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1710 DEVNAME(sc));
1711 return (1);
1712 }
1713
1714 return (0);
1715 }
1716
1717 #define MPII_EVENT_MASKALL(enq) do { \
1718 enq->event_masks[0] = 0xffffffff; \
1719 enq->event_masks[1] = 0xffffffff; \
1720 enq->event_masks[2] = 0xffffffff; \
1721 enq->event_masks[3] = 0xffffffff; \
1722 } while (0)
1723
1724 #define MPII_EVENT_UNMASK(enq, evt) do { \
1725 enq->event_masks[evt / 32] &= \
1726 htole32(~(1 << (evt % 32))); \
1727 } while (0)
1728
1729 int
mpii_eventnotify(struct mpii_softc * sc)1730 mpii_eventnotify(struct mpii_softc *sc)
1731 {
1732 struct mpii_msg_event_request *enq;
1733 struct mpii_ccb *ccb;
1734
1735 ccb = scsi_io_get(&sc->sc_iopool, 0);
1736 if (ccb == NULL) {
1737 DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1738 DEVNAME(sc));
1739 return (1);
1740 }
1741
1742 SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1743 mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO);
1744 task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc);
1745
1746 SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1747 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
1748 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
1749 mpii_eventack, sc);
1750
1751 ccb->ccb_done = mpii_eventnotify_done;
1752 enq = ccb->ccb_cmd;
1753
1754 enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1755
1756 /*
1757 * Enable reporting of the following events:
1758 *
1759 * MPII_EVENT_SAS_DISCOVERY
1760 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1761 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1762 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1763 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1764 * MPII_EVENT_IR_VOLUME
1765 * MPII_EVENT_IR_PHYSICAL_DISK
1766 * MPII_EVENT_IR_OPERATION_STATUS
1767 */
1768
1769 MPII_EVENT_MASKALL(enq);
1770 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1771 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1772 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1773 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1774 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1775 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1776 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1777 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1778
1779 mpii_start(sc, ccb);
1780
1781 return (0);
1782 }
1783
1784 void
mpii_eventnotify_done(struct mpii_ccb * ccb)1785 mpii_eventnotify_done(struct mpii_ccb *ccb)
1786 {
1787 struct mpii_softc *sc = ccb->ccb_sc;
1788 struct mpii_rcb *rcb = ccb->ccb_rcb;
1789
1790 DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1791
1792 scsi_io_put(&sc->sc_iopool, ccb);
1793 mpii_event_process(sc, rcb);
1794 }
1795
1796 void
mpii_event_raid(struct mpii_softc * sc,struct mpii_msg_event_reply * enp)1797 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1798 {
1799 struct mpii_evt_ir_cfg_change_list *ccl;
1800 struct mpii_evt_ir_cfg_element *ce;
1801 struct mpii_device *dev;
1802 u_int16_t type;
1803 int i;
1804
1805 ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1806 if (ccl->num_elements == 0)
1807 return;
1808
1809 if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1810 /* bail on foreign configurations */
1811 return;
1812 }
1813
1814 ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1815
1816 for (i = 0; i < ccl->num_elements; i++, ce++) {
1817 type = (lemtoh16(&ce->element_flags) &
1818 MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1819
1820 switch (type) {
1821 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1822 switch (ce->reason_code) {
1823 case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1824 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1825 if (mpii_find_dev(sc,
1826 lemtoh16(&ce->vol_dev_handle))) {
1827 printf("%s: device %#x is already "
1828 "configured\n", DEVNAME(sc),
1829 lemtoh16(&ce->vol_dev_handle));
1830 break;
1831 }
1832 dev = malloc(sizeof(*dev), M_DEVBUF,
1833 M_NOWAIT | M_ZERO);
1834 if (!dev) {
1835 printf("%s: failed to allocate a "
1836 "device structure\n", DEVNAME(sc));
1837 break;
1838 }
1839 SET(dev->flags, MPII_DF_VOLUME);
1840 dev->slot = sc->sc_vd_id_low;
1841 dev->dev_handle = lemtoh16(&ce->vol_dev_handle);
1842 if (mpii_insert_dev(sc, dev)) {
1843 free(dev, M_DEVBUF, sizeof *dev);
1844 break;
1845 }
1846 sc->sc_vd_count++;
1847 break;
1848 case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1849 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1850 if (!(dev = mpii_find_dev(sc,
1851 lemtoh16(&ce->vol_dev_handle))))
1852 break;
1853 mpii_remove_dev(sc, dev);
1854 sc->sc_vd_count--;
1855 break;
1856 }
1857 break;
1858 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1859 if (ce->reason_code ==
1860 MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1861 ce->reason_code ==
1862 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1863 /* there should be an underlying sas drive */
1864 if (!(dev = mpii_find_dev(sc,
1865 lemtoh16(&ce->phys_disk_dev_handle))))
1866 break;
1867 /* promoted from a hot spare? */
1868 CLR(dev->flags, MPII_DF_HOT_SPARE);
1869 SET(dev->flags, MPII_DF_VOLUME_DISK |
1870 MPII_DF_HIDDEN);
1871 }
1872 break;
1873 case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1874 if (ce->reason_code ==
1875 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1876 /* there should be an underlying sas drive */
1877 if (!(dev = mpii_find_dev(sc,
1878 lemtoh16(&ce->phys_disk_dev_handle))))
1879 break;
1880 SET(dev->flags, MPII_DF_HOT_SPARE |
1881 MPII_DF_HIDDEN);
1882 }
1883 break;
1884 }
1885 }
1886 }
1887
1888 void
mpii_event_sas(void * xsc)1889 mpii_event_sas(void *xsc)
1890 {
1891 struct mpii_softc *sc = xsc;
1892 struct mpii_rcb *rcb, *next;
1893 struct mpii_msg_event_reply *enp;
1894 struct mpii_evt_sas_tcl *tcl;
1895 struct mpii_evt_phy_entry *pe;
1896 struct mpii_device *dev;
1897 int i;
1898 u_int16_t handle;
1899
1900 mtx_enter(&sc->sc_evt_sas_mtx);
1901 rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1902 if (rcb != NULL) {
1903 next = SIMPLEQ_NEXT(rcb, rcb_link);
1904 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link);
1905 }
1906 mtx_leave(&sc->sc_evt_sas_mtx);
1907
1908 if (rcb == NULL)
1909 return;
1910 if (next != NULL)
1911 task_add(systq, &sc->sc_evt_sas_task);
1912
1913 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1914 switch (lemtoh16(&enp->event)) {
1915 case MPII_EVENT_SAS_DISCOVERY:
1916 mpii_event_discovery(sc, enp);
1917 goto done;
1918 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1919 /* handle below */
1920 break;
1921 default:
1922 panic("%s: unexpected event %#x in sas event queue",
1923 DEVNAME(sc), lemtoh16(&enp->event));
1924 /* NOTREACHED */
1925 }
1926
1927 tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1928 pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1929
1930 for (i = 0; i < tcl->num_entries; i++, pe++) {
1931 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1932 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1933 handle = lemtoh16(&pe->dev_handle);
1934 if (mpii_find_dev(sc, handle)) {
1935 printf("%s: device %#x is already "
1936 "configured\n", DEVNAME(sc), handle);
1937 break;
1938 }
1939
1940 dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1941 dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1942 dev->dev_handle = handle;
1943 dev->phy_num = tcl->start_phy_num + i;
1944 if (tcl->enclosure_handle)
1945 dev->physical_port = tcl->physical_port;
1946 dev->enclosure = lemtoh16(&tcl->enclosure_handle);
1947 dev->expander = lemtoh16(&tcl->expander_handle);
1948
1949 if (mpii_insert_dev(sc, dev)) {
1950 free(dev, M_DEVBUF, sizeof *dev);
1951 break;
1952 }
1953
1954 if (sc->sc_scsibus != NULL)
1955 scsi_probe_target(sc->sc_scsibus, dev->slot);
1956 break;
1957
1958 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1959 dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle));
1960 if (dev == NULL)
1961 break;
1962
1963 mpii_remove_dev(sc, dev);
1964 mpii_sas_remove_device(sc, dev->dev_handle);
1965 if (sc->sc_scsibus != NULL &&
1966 !ISSET(dev->flags, MPII_DF_HIDDEN)) {
1967 scsi_activate(sc->sc_scsibus, dev->slot, -1,
1968 DVACT_DEACTIVATE);
1969 scsi_detach_target(sc->sc_scsibus, dev->slot,
1970 DETACH_FORCE);
1971 }
1972
1973 free(dev, M_DEVBUF, sizeof *dev);
1974 break;
1975 }
1976 }
1977
1978 done:
1979 mpii_event_done(sc, rcb);
1980 }
1981
1982 void
mpii_event_discovery(struct mpii_softc * sc,struct mpii_msg_event_reply * enp)1983 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1984 {
1985 struct mpii_evt_sas_discovery *esd =
1986 (struct mpii_evt_sas_discovery *)(enp + 1);
1987
1988 if (sc->sc_pending == 0)
1989 return;
1990
1991 switch (esd->reason_code) {
1992 case MPII_EVENT_SAS_DISC_REASON_CODE_STARTED:
1993 ++sc->sc_pending;
1994 break;
1995 case MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED:
1996 if (--sc->sc_pending == 1) {
1997 sc->sc_pending = 0;
1998 config_pending_decr();
1999 }
2000 break;
2001 }
2002 }
2003
2004 void
mpii_event_process(struct mpii_softc * sc,struct mpii_rcb * rcb)2005 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
2006 {
2007 struct mpii_msg_event_reply *enp;
2008
2009 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
2010
2011 DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
2012 lemtoh16(&enp->event));
2013
2014 switch (lemtoh16(&enp->event)) {
2015 case MPII_EVENT_EVENT_CHANGE:
2016 /* should be properly ignored */
2017 break;
2018 case MPII_EVENT_SAS_DISCOVERY:
2019 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
2020 mtx_enter(&sc->sc_evt_sas_mtx);
2021 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
2022 mtx_leave(&sc->sc_evt_sas_mtx);
2023 task_add(systq, &sc->sc_evt_sas_task);
2024 return;
2025 case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
2026 break;
2027 case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
2028 break;
2029 case MPII_EVENT_IR_VOLUME: {
2030 struct mpii_evt_ir_volume *evd =
2031 (struct mpii_evt_ir_volume *)(enp + 1);
2032 struct mpii_device *dev;
2033 #if NBIO > 0
2034 const char *vol_states[] = {
2035 BIOC_SVINVALID_S,
2036 BIOC_SVOFFLINE_S,
2037 BIOC_SVBUILDING_S,
2038 BIOC_SVONLINE_S,
2039 BIOC_SVDEGRADED_S,
2040 BIOC_SVONLINE_S,
2041 };
2042 #endif
2043
2044 if (cold)
2045 break;
2046 KERNEL_LOCK();
2047 dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle));
2048 KERNEL_UNLOCK();
2049 if (dev == NULL)
2050 break;
2051 #if NBIO > 0
2052 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
2053 printf("%s: volume %d state changed from %s to %s\n",
2054 DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
2055 vol_states[evd->prev_value],
2056 vol_states[evd->new_value]);
2057 #endif
2058 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
2059 ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
2060 !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
2061 printf("%s: started resync on a volume %d\n",
2062 DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
2063 }
2064 break;
2065 case MPII_EVENT_IR_PHYSICAL_DISK:
2066 break;
2067 case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
2068 mpii_event_raid(sc, enp);
2069 break;
2070 case MPII_EVENT_IR_OPERATION_STATUS: {
2071 struct mpii_evt_ir_status *evs =
2072 (struct mpii_evt_ir_status *)(enp + 1);
2073 struct mpii_device *dev;
2074
2075 KERNEL_LOCK();
2076 dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle));
2077 KERNEL_UNLOCK();
2078 if (dev != NULL &&
2079 evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2080 dev->percent = evs->percent;
2081 break;
2082 }
2083 default:
2084 DNPRINTF(MPII_D_EVT, "%s: unhandled event 0x%02x\n",
2085 DEVNAME(sc), lemtoh16(&enp->event));
2086 }
2087
2088 mpii_event_done(sc, rcb);
2089 }
2090
2091 void
mpii_event_done(struct mpii_softc * sc,struct mpii_rcb * rcb)2092 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2093 {
2094 struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2095
2096 if (enp->ack_required) {
2097 mtx_enter(&sc->sc_evt_ack_mtx);
2098 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2099 mtx_leave(&sc->sc_evt_ack_mtx);
2100 scsi_ioh_add(&sc->sc_evt_ack_handler);
2101 } else
2102 mpii_push_reply(sc, rcb);
2103 }
2104
2105 void
mpii_sas_remove_device(struct mpii_softc * sc,u_int16_t handle)2106 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2107 {
2108 struct mpii_msg_scsi_task_request *stq;
2109 struct mpii_msg_sas_oper_request *soq;
2110 struct mpii_ccb *ccb;
2111
2112 ccb = scsi_io_get(&sc->sc_iopool, 0);
2113 if (ccb == NULL)
2114 return;
2115
2116 stq = ccb->ccb_cmd;
2117 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2118 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2119 htolem16(&stq->dev_handle, handle);
2120
2121 ccb->ccb_done = mpii_empty_done;
2122 mpii_wait(sc, ccb);
2123
2124 if (ccb->ccb_rcb != NULL)
2125 mpii_push_reply(sc, ccb->ccb_rcb);
2126
2127 /* reuse a ccb */
2128 ccb->ccb_state = MPII_CCB_READY;
2129 ccb->ccb_rcb = NULL;
2130
2131 soq = ccb->ccb_cmd;
2132 memset(soq, 0, sizeof(*soq));
2133 soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2134 soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2135 htolem16(&soq->dev_handle, handle);
2136
2137 ccb->ccb_done = mpii_empty_done;
2138 mpii_wait(sc, ccb);
2139 if (ccb->ccb_rcb != NULL)
2140 mpii_push_reply(sc, ccb->ccb_rcb);
2141
2142 scsi_io_put(&sc->sc_iopool, ccb);
2143 }
2144
2145 int
mpii_board_info(struct mpii_softc * sc)2146 mpii_board_info(struct mpii_softc *sc)
2147 {
2148 struct mpii_msg_iocfacts_request ifq;
2149 struct mpii_msg_iocfacts_reply ifp;
2150 struct mpii_cfg_manufacturing_pg0 mpg;
2151 struct mpii_cfg_hdr hdr;
2152
2153 memset(&ifq, 0, sizeof(ifq));
2154 memset(&ifp, 0, sizeof(ifp));
2155
2156 ifq.function = MPII_FUNCTION_IOC_FACTS;
2157
2158 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2159 DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2160 DEVNAME(sc));
2161 return (1);
2162 }
2163
2164 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2165 DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2166 DEVNAME(sc));
2167 return (1);
2168 }
2169
2170 hdr.page_version = 0;
2171 hdr.page_length = sizeof(mpg) / 4;
2172 hdr.page_number = 0;
2173 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2174 memset(&mpg, 0, sizeof(mpg));
2175 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2176 sizeof(mpg)) != 0) {
2177 printf("%s: unable to fetch manufacturing page 0\n",
2178 DEVNAME(sc));
2179 return (EINVAL);
2180 }
2181
2182 printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2183 mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2184 ifp.fw_version_unit, ifp.fw_version_dev,
2185 ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2186 ifp.msg_version_maj, ifp.msg_version_min);
2187
2188 return (0);
2189 }
2190
2191 int
mpii_target_map(struct mpii_softc * sc)2192 mpii_target_map(struct mpii_softc *sc)
2193 {
2194 struct mpii_cfg_hdr hdr;
2195 struct mpii_cfg_ioc_pg8 ipg;
2196 int flags, pad = 0;
2197
2198 hdr.page_version = 0;
2199 hdr.page_length = sizeof(ipg) / 4;
2200 hdr.page_number = 8;
2201 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2202 memset(&ipg, 0, sizeof(ipg));
2203 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2204 sizeof(ipg)) != 0) {
2205 printf("%s: unable to fetch ioc page 8\n",
2206 DEVNAME(sc));
2207 return (EINVAL);
2208 }
2209
2210 if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2211 pad = 1;
2212
2213 flags = lemtoh16(&ipg.ir_volume_mapping_flags) &
2214 MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2215 if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2216 if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2217 sc->sc_vd_id_low += pad;
2218 pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2219 } else
2220 sc->sc_vd_id_low = sc->sc_max_devices -
2221 sc->sc_max_volumes;
2222 }
2223
2224 sc->sc_pd_id_start += pad;
2225
2226 return (0);
2227 }
2228
2229 int
mpii_req_cfg_header(struct mpii_softc * sc,u_int8_t type,u_int8_t number,u_int32_t address,int flags,void * p)2230 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2231 u_int32_t address, int flags, void *p)
2232 {
2233 struct mpii_msg_config_request *cq;
2234 struct mpii_msg_config_reply *cp;
2235 struct mpii_ccb *ccb;
2236 struct mpii_cfg_hdr *hdr = p;
2237 struct mpii_ecfg_hdr *ehdr = p;
2238 int etype = 0;
2239 int rv = 0;
2240
2241 DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2242 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2243 address, flags, MPII_PG_FMT);
2244
2245 ccb = scsi_io_get(&sc->sc_iopool,
2246 ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2247 if (ccb == NULL) {
2248 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2249 DEVNAME(sc));
2250 return (1);
2251 }
2252
2253 if (ISSET(flags, MPII_PG_EXTENDED)) {
2254 etype = type;
2255 type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2256 }
2257
2258 cq = ccb->ccb_cmd;
2259
2260 cq->function = MPII_FUNCTION_CONFIG;
2261
2262 cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2263
2264 cq->config_header.page_number = number;
2265 cq->config_header.page_type = type;
2266 cq->ext_page_type = etype;
2267 htolem32(&cq->page_address, address);
2268 htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2269 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2270
2271 ccb->ccb_done = mpii_empty_done;
2272 if (ISSET(flags, MPII_PG_POLL)) {
2273 if (mpii_poll(sc, ccb) != 0) {
2274 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2275 DEVNAME(sc));
2276 return (1);
2277 }
2278 } else
2279 mpii_wait(sc, ccb);
2280
2281 if (ccb->ccb_rcb == NULL) {
2282 scsi_io_put(&sc->sc_iopool, ccb);
2283 return (1);
2284 }
2285 cp = ccb->ccb_rcb->rcb_reply;
2286
2287 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x sgl_flags: 0x%02x "
2288 "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2289 cp->sgl_flags, cp->msg_length, cp->function);
2290 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2291 "msg_flags: 0x%02x\n", DEVNAME(sc),
2292 lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2293 cp->msg_flags);
2294 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2295 cp->vp_id, cp->vf_id);
2296 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2297 lemtoh16(&cp->ioc_status));
2298 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2299 lemtoh32(&cp->ioc_loginfo));
2300 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2301 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2302 cp->config_header.page_version,
2303 cp->config_header.page_length,
2304 cp->config_header.page_number,
2305 cp->config_header.page_type);
2306
2307 if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2308 rv = 1;
2309 else if (ISSET(flags, MPII_PG_EXTENDED)) {
2310 memset(ehdr, 0, sizeof(*ehdr));
2311 ehdr->page_version = cp->config_header.page_version;
2312 ehdr->page_number = cp->config_header.page_number;
2313 ehdr->page_type = cp->config_header.page_type;
2314 ehdr->ext_page_length = cp->ext_page_length;
2315 ehdr->ext_page_type = cp->ext_page_type;
2316 } else
2317 *hdr = cp->config_header;
2318
2319 mpii_push_reply(sc, ccb->ccb_rcb);
2320 scsi_io_put(&sc->sc_iopool, ccb);
2321
2322 return (rv);
2323 }
2324
2325 int
mpii_req_cfg_page(struct mpii_softc * sc,u_int32_t address,int flags,void * p,int read,void * page,size_t len)2326 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2327 void *p, int read, void *page, size_t len)
2328 {
2329 struct mpii_msg_config_request *cq;
2330 struct mpii_msg_config_reply *cp;
2331 struct mpii_ccb *ccb;
2332 struct mpii_cfg_hdr *hdr = p;
2333 struct mpii_ecfg_hdr *ehdr = p;
2334 caddr_t kva;
2335 int page_length;
2336 int rv = 0;
2337
2338 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2339 "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2340
2341 page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2342 lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2343
2344 if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2345 return (1);
2346
2347 ccb = scsi_io_get(&sc->sc_iopool,
2348 ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2349 if (ccb == NULL) {
2350 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2351 DEVNAME(sc));
2352 return (1);
2353 }
2354
2355 cq = ccb->ccb_cmd;
2356
2357 cq->function = MPII_FUNCTION_CONFIG;
2358
2359 cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2360 MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2361
2362 if (ISSET(flags, MPII_PG_EXTENDED)) {
2363 cq->config_header.page_version = ehdr->page_version;
2364 cq->config_header.page_number = ehdr->page_number;
2365 cq->config_header.page_type = ehdr->page_type;
2366 cq->ext_page_len = ehdr->ext_page_length;
2367 cq->ext_page_type = ehdr->ext_page_type;
2368 } else
2369 cq->config_header = *hdr;
2370 cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2371 htolem32(&cq->page_address, address);
2372 htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2373 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2374 MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2375 (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2376
2377 /* bounce the page via the request space to avoid more bus_dma games */
2378 mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2379 sizeof(struct mpii_msg_config_request));
2380
2381 kva = ccb->ccb_cmd;
2382 kva += sizeof(struct mpii_msg_config_request);
2383
2384 if (!read)
2385 memcpy(kva, page, len);
2386
2387 ccb->ccb_done = mpii_empty_done;
2388 if (ISSET(flags, MPII_PG_POLL)) {
2389 if (mpii_poll(sc, ccb) != 0) {
2390 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2391 DEVNAME(sc));
2392 return (1);
2393 }
2394 } else
2395 mpii_wait(sc, ccb);
2396
2397 if (ccb->ccb_rcb == NULL) {
2398 scsi_io_put(&sc->sc_iopool, ccb);
2399 return (1);
2400 }
2401 cp = ccb->ccb_rcb->rcb_reply;
2402
2403 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x msg_length: %d "
2404 "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2405 cp->function);
2406 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2407 "msg_flags: 0x%02x\n", DEVNAME(sc),
2408 lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2409 cp->msg_flags);
2410 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2411 cp->vp_id, cp->vf_id);
2412 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2413 lemtoh16(&cp->ioc_status));
2414 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2415 lemtoh32(&cp->ioc_loginfo));
2416 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2417 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2418 cp->config_header.page_version,
2419 cp->config_header.page_length,
2420 cp->config_header.page_number,
2421 cp->config_header.page_type);
2422
2423 if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2424 rv = 1;
2425 else if (read)
2426 memcpy(page, kva, len);
2427
2428 mpii_push_reply(sc, ccb->ccb_rcb);
2429 scsi_io_put(&sc->sc_iopool, ccb);
2430
2431 return (rv);
2432 }
2433
2434 struct mpii_rcb *
mpii_reply(struct mpii_softc * sc,struct mpii_reply_descr * rdp)2435 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2436 {
2437 struct mpii_rcb *rcb = NULL;
2438 u_int32_t rfid;
2439
2440 DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2441
2442 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2443 MPII_REPLY_DESCR_ADDRESS_REPLY) {
2444 rfid = (lemtoh32(&rdp->frame_addr) -
2445 (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2446 sc->sc_reply_size;
2447
2448 bus_dmamap_sync(sc->sc_dmat,
2449 MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2450 sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2451
2452 rcb = &sc->sc_rcbs[rfid];
2453 }
2454
2455 memset(rdp, 0xff, sizeof(*rdp));
2456
2457 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2458 8 * sc->sc_reply_post_host_index, 8,
2459 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2460
2461 return (rcb);
2462 }
2463
2464 struct mpii_dmamem *
mpii_dmamem_alloc(struct mpii_softc * sc,size_t size)2465 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2466 {
2467 struct mpii_dmamem *mdm;
2468 int nsegs;
2469
2470 mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2471 if (mdm == NULL)
2472 return (NULL);
2473
2474 mdm->mdm_size = size;
2475
2476 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2477 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2478 goto mdmfree;
2479
2480 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2481 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2482 goto destroy;
2483
2484 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2485 &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2486 goto free;
2487
2488 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2489 NULL, BUS_DMA_NOWAIT) != 0)
2490 goto unmap;
2491
2492 return (mdm);
2493
2494 unmap:
2495 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2496 free:
2497 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2498 destroy:
2499 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2500 mdmfree:
2501 free(mdm, M_DEVBUF, sizeof *mdm);
2502
2503 return (NULL);
2504 }
2505
2506 void
mpii_dmamem_free(struct mpii_softc * sc,struct mpii_dmamem * mdm)2507 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2508 {
2509 DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2510
2511 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2512 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2513 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2514 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2515 free(mdm, M_DEVBUF, sizeof *mdm);
2516 }
2517
2518 int
mpii_insert_dev(struct mpii_softc * sc,struct mpii_device * dev)2519 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2520 {
2521 int slot; /* initial hint */
2522
2523 if (dev == NULL || dev->slot < 0)
2524 return (1);
2525 slot = dev->slot;
2526
2527 while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2528 slot++;
2529
2530 if (slot >= sc->sc_max_devices)
2531 return (1);
2532
2533 dev->slot = slot;
2534 sc->sc_devs[slot] = dev;
2535
2536 return (0);
2537 }
2538
2539 int
mpii_remove_dev(struct mpii_softc * sc,struct mpii_device * dev)2540 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2541 {
2542 int i;
2543
2544 if (dev == NULL)
2545 return (1);
2546
2547 for (i = 0; i < sc->sc_max_devices; i++) {
2548 if (sc->sc_devs[i] == NULL)
2549 continue;
2550
2551 if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2552 sc->sc_devs[i] = NULL;
2553 return (0);
2554 }
2555 }
2556
2557 return (1);
2558 }
2559
2560 struct mpii_device *
mpii_find_dev(struct mpii_softc * sc,u_int16_t handle)2561 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2562 {
2563 int i;
2564
2565 for (i = 0; i < sc->sc_max_devices; i++) {
2566 if (sc->sc_devs[i] == NULL)
2567 continue;
2568
2569 if (sc->sc_devs[i]->dev_handle == handle)
2570 return (sc->sc_devs[i]);
2571 }
2572
2573 return (NULL);
2574 }
2575
2576 int
mpii_alloc_ccbs(struct mpii_softc * sc)2577 mpii_alloc_ccbs(struct mpii_softc *sc)
2578 {
2579 struct mpii_ccb *ccb;
2580 u_int8_t *cmd;
2581 int i;
2582
2583 SIMPLEQ_INIT(&sc->sc_ccb_free);
2584 SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2585 mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO);
2586 mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
2587 scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool,
2588 mpii_scsi_cmd_tmo_handler, sc);
2589
2590 sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb),
2591 M_DEVBUF, M_NOWAIT | M_ZERO);
2592 if (sc->sc_ccbs == NULL) {
2593 printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2594 return (1);
2595 }
2596
2597 sc->sc_requests = mpii_dmamem_alloc(sc,
2598 sc->sc_request_size * sc->sc_max_cmds);
2599 if (sc->sc_requests == NULL) {
2600 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2601 goto free_ccbs;
2602 }
2603 cmd = MPII_DMA_KVA(sc->sc_requests);
2604
2605 /*
2606 * we have sc->sc_max_cmds system request message
2607 * frames, but smid zero cannot be used. so we then
2608 * have (sc->sc_max_cmds - 1) number of ccbs
2609 */
2610 for (i = 1; i < sc->sc_max_cmds; i++) {
2611 ccb = &sc->sc_ccbs[i - 1];
2612
2613 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2614 MAXPHYS, 0,
2615 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2616 &ccb->ccb_dmamap) != 0) {
2617 printf("%s: unable to create dma map\n", DEVNAME(sc));
2618 goto free_maps;
2619 }
2620
2621 ccb->ccb_sc = sc;
2622 htolem16(&ccb->ccb_smid, i);
2623 ccb->ccb_offset = sc->sc_request_size * i;
2624
2625 ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2626 ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2627 ccb->ccb_offset;
2628
2629 DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2630 "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2631 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2632 ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2633 ccb->ccb_cmd_dva);
2634
2635 mpii_put_ccb(sc, ccb);
2636 }
2637
2638 scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb);
2639
2640 return (0);
2641
2642 free_maps:
2643 while ((ccb = mpii_get_ccb(sc)) != NULL)
2644 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2645
2646 mpii_dmamem_free(sc, sc->sc_requests);
2647 free_ccbs:
2648 free(sc->sc_ccbs, M_DEVBUF, (sc->sc_max_cmds-1) * sizeof(*ccb));
2649
2650 return (1);
2651 }
2652
2653 void
mpii_put_ccb(void * cookie,void * io)2654 mpii_put_ccb(void *cookie, void *io)
2655 {
2656 struct mpii_softc *sc = cookie;
2657 struct mpii_ccb *ccb = io;
2658
2659 DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2660
2661 ccb->ccb_state = MPII_CCB_FREE;
2662 ccb->ccb_cookie = NULL;
2663 ccb->ccb_done = NULL;
2664 ccb->ccb_rcb = NULL;
2665 memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2666
2667 KERNEL_UNLOCK();
2668 mtx_enter(&sc->sc_ccb_free_mtx);
2669 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2670 mtx_leave(&sc->sc_ccb_free_mtx);
2671 KERNEL_LOCK();
2672 }
2673
2674 void *
mpii_get_ccb(void * cookie)2675 mpii_get_ccb(void *cookie)
2676 {
2677 struct mpii_softc *sc = cookie;
2678 struct mpii_ccb *ccb;
2679
2680 KERNEL_UNLOCK();
2681
2682 mtx_enter(&sc->sc_ccb_free_mtx);
2683 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2684 if (ccb != NULL) {
2685 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2686 ccb->ccb_state = MPII_CCB_READY;
2687 }
2688 mtx_leave(&sc->sc_ccb_free_mtx);
2689
2690 KERNEL_LOCK();
2691
2692 DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2693
2694 return (ccb);
2695 }
2696
2697 int
mpii_alloc_replies(struct mpii_softc * sc)2698 mpii_alloc_replies(struct mpii_softc *sc)
2699 {
2700 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2701
2702 sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames,
2703 sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT);
2704 if (sc->sc_rcbs == NULL)
2705 return (1);
2706
2707 sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2708 sc->sc_num_reply_frames);
2709 if (sc->sc_replies == NULL) {
2710 free(sc->sc_rcbs, M_DEVBUF,
2711 sc->sc_num_reply_frames * sizeof(struct mpii_rcb));
2712 return (1);
2713 }
2714
2715 return (0);
2716 }
2717
2718 void
mpii_push_replies(struct mpii_softc * sc)2719 mpii_push_replies(struct mpii_softc *sc)
2720 {
2721 struct mpii_rcb *rcb;
2722 caddr_t kva = MPII_DMA_KVA(sc->sc_replies);
2723 int i;
2724
2725 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2726 0, sc->sc_reply_size * sc->sc_num_reply_frames,
2727 BUS_DMASYNC_PREREAD);
2728
2729 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2730 rcb = &sc->sc_rcbs[i];
2731
2732 rcb->rcb_reply = kva + sc->sc_reply_size * i;
2733 rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2734 sc->sc_reply_size * i;
2735 mpii_push_reply(sc, rcb);
2736 }
2737 }
2738
2739 void
mpii_start(struct mpii_softc * sc,struct mpii_ccb * ccb)2740 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2741 {
2742 struct mpii_request_header *rhp;
2743 struct mpii_request_descr descr;
2744 u_long *rdp = (u_long *)&descr;
2745
2746 DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2747 ccb->ccb_cmd_dva);
2748
2749 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2750 ccb->ccb_offset, sc->sc_request_size,
2751 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2752
2753 ccb->ccb_state = MPII_CCB_QUEUED;
2754
2755 rhp = ccb->ccb_cmd;
2756
2757 memset(&descr, 0, sizeof(descr));
2758
2759 switch (rhp->function) {
2760 case MPII_FUNCTION_SCSI_IO_REQUEST:
2761 descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2762 descr.dev_handle = htole16(ccb->ccb_dev_handle);
2763 break;
2764 case MPII_FUNCTION_SCSI_TASK_MGMT:
2765 descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2766 break;
2767 default:
2768 descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2769 }
2770
2771 descr.vf_id = sc->sc_vf_id;
2772 descr.smid = ccb->ccb_smid;
2773
2774 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2775 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2776
2777 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2778 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2779
2780 #if defined(__LP64__)
2781 bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2782 MPII_REQ_DESCR_POST_LOW, *rdp);
2783 #else
2784 mtx_enter(&sc->sc_req_mtx);
2785 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2786 MPII_REQ_DESCR_POST_LOW, rdp[0]);
2787 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2788 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2789
2790 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2791 MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2792 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2793 MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2794 mtx_leave(&sc->sc_req_mtx);
2795 #endif
2796 }
2797
2798 int
mpii_poll(struct mpii_softc * sc,struct mpii_ccb * ccb)2799 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2800 {
2801 void (*done)(struct mpii_ccb *);
2802 void *cookie;
2803 int rv = 1;
2804
2805 DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2806
2807 done = ccb->ccb_done;
2808 cookie = ccb->ccb_cookie;
2809
2810 ccb->ccb_done = mpii_poll_done;
2811 ccb->ccb_cookie = &rv;
2812
2813 mpii_start(sc, ccb);
2814
2815 while (rv == 1) {
2816 /* avoid excessive polling */
2817 if (mpii_reply_waiting(sc))
2818 mpii_intr(sc);
2819 else
2820 delay(10);
2821 }
2822
2823 ccb->ccb_cookie = cookie;
2824 done(ccb);
2825
2826 return (0);
2827 }
2828
2829 void
mpii_poll_done(struct mpii_ccb * ccb)2830 mpii_poll_done(struct mpii_ccb *ccb)
2831 {
2832 int *rv = ccb->ccb_cookie;
2833
2834 *rv = 0;
2835 }
2836
2837 int
mpii_alloc_queues(struct mpii_softc * sc)2838 mpii_alloc_queues(struct mpii_softc *sc)
2839 {
2840 u_int32_t *rfp;
2841 int i;
2842
2843 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2844
2845 sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2846 sc->sc_reply_free_qdepth * sizeof(*rfp));
2847 if (sc->sc_reply_freeq == NULL)
2848 return (1);
2849 rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2850 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2851 rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2852 sc->sc_reply_size * i;
2853 }
2854
2855 sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2856 sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2857 if (sc->sc_reply_postq == NULL)
2858 goto free_reply_freeq;
2859 sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2860 memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2861 sizeof(struct mpii_reply_descr));
2862
2863 return (0);
2864
2865 free_reply_freeq:
2866 mpii_dmamem_free(sc, sc->sc_reply_freeq);
2867 return (1);
2868 }
2869
2870 void
mpii_init_queues(struct mpii_softc * sc)2871 mpii_init_queues(struct mpii_softc *sc)
2872 {
2873 DNPRINTF(MPII_D_MISC, "%s: mpii_init_queues\n", DEVNAME(sc));
2874
2875 sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2876 sc->sc_reply_post_host_index = 0;
2877 mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2878 mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2879 }
2880
2881 void
mpii_wait(struct mpii_softc * sc,struct mpii_ccb * ccb)2882 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2883 {
2884 struct mutex mtx;
2885 void (*done)(struct mpii_ccb *);
2886 void *cookie;
2887
2888 mtx_init(&mtx, IPL_BIO);
2889
2890 done = ccb->ccb_done;
2891 cookie = ccb->ccb_cookie;
2892
2893 ccb->ccb_done = mpii_wait_done;
2894 ccb->ccb_cookie = &mtx;
2895
2896 /* XXX this will wait forever for the ccb to complete */
2897
2898 mpii_start(sc, ccb);
2899
2900 mtx_enter(&mtx);
2901 while (ccb->ccb_cookie != NULL)
2902 msleep_nsec(ccb, &mtx, PRIBIO, "mpiiwait", INFSLP);
2903 mtx_leave(&mtx);
2904
2905 ccb->ccb_cookie = cookie;
2906 done(ccb);
2907 }
2908
2909 void
mpii_wait_done(struct mpii_ccb * ccb)2910 mpii_wait_done(struct mpii_ccb *ccb)
2911 {
2912 struct mutex *mtx = ccb->ccb_cookie;
2913
2914 mtx_enter(mtx);
2915 ccb->ccb_cookie = NULL;
2916 mtx_leave(mtx);
2917
2918 wakeup_one(ccb);
2919 }
2920
2921 void
mpii_scsi_cmd(struct scsi_xfer * xs)2922 mpii_scsi_cmd(struct scsi_xfer *xs)
2923 {
2924 struct scsi_link *link = xs->sc_link;
2925 struct mpii_softc *sc = link->bus->sb_adapter_softc;
2926 struct mpii_ccb *ccb = xs->io;
2927 struct mpii_msg_scsi_io *io;
2928 struct mpii_device *dev;
2929 int ret;
2930
2931 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc));
2932
2933 if (xs->cmdlen > MPII_CDB_LEN) {
2934 DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2935 DEVNAME(sc), xs->cmdlen);
2936 memset(&xs->sense, 0, sizeof(xs->sense));
2937 xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
2938 xs->sense.flags = SKEY_ILLEGAL_REQUEST;
2939 xs->sense.add_sense_code = 0x20;
2940 xs->error = XS_SENSE;
2941 scsi_done(xs);
2942 return;
2943 }
2944
2945 if ((dev = sc->sc_devs[link->target]) == NULL) {
2946 /* device no longer exists */
2947 xs->error = XS_SELTIMEOUT;
2948 scsi_done(xs);
2949 return;
2950 }
2951
2952 KERNEL_UNLOCK();
2953
2954 DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n",
2955 DEVNAME(sc), ccb->ccb_smid, xs->flags);
2956
2957 ccb->ccb_cookie = xs;
2958 ccb->ccb_done = mpii_scsi_cmd_done;
2959 ccb->ccb_dev_handle = dev->dev_handle;
2960
2961 io = ccb->ccb_cmd;
2962 memset(io, 0, sizeof(*io));
2963 io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
2964 io->sense_buffer_length = sizeof(xs->sense);
2965 io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
2966 htolem16(&io->io_flags, xs->cmdlen);
2967 htolem16(&io->dev_handle, ccb->ccb_dev_handle);
2968 htobem16(&io->lun[0], link->lun);
2969
2970 switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2971 case SCSI_DATA_IN:
2972 io->direction = MPII_SCSIIO_DIR_READ;
2973 break;
2974 case SCSI_DATA_OUT:
2975 io->direction = MPII_SCSIIO_DIR_WRITE;
2976 break;
2977 default:
2978 io->direction = MPII_SCSIIO_DIR_NONE;
2979 break;
2980 }
2981
2982 io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
2983
2984 memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2985
2986 htolem32(&io->data_length, xs->datalen);
2987
2988 /* sense data is at the end of a request */
2989 htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva +
2990 sc->sc_request_size - sizeof(struct scsi_sense_data));
2991
2992 if (ISSET(sc->sc_flags, MPII_F_SAS3))
2993 ret = mpii_load_xs_sas3(ccb);
2994 else
2995 ret = mpii_load_xs(ccb);
2996
2997 if (ret != 0) {
2998 xs->error = XS_DRIVER_STUFFUP;
2999 goto done;
3000 }
3001
3002 timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb);
3003 if (xs->flags & SCSI_POLL) {
3004 if (mpii_poll(sc, ccb) != 0) {
3005 xs->error = XS_DRIVER_STUFFUP;
3006 goto done;
3007 }
3008 } else {
3009 timeout_add_msec(&xs->stimeout, xs->timeout);
3010 mpii_start(sc, ccb);
3011 }
3012
3013 KERNEL_LOCK();
3014 return;
3015
3016 done:
3017 KERNEL_LOCK();
3018 scsi_done(xs);
3019 }
3020
3021 void
mpii_scsi_cmd_tmo(void * xccb)3022 mpii_scsi_cmd_tmo(void *xccb)
3023 {
3024 struct mpii_ccb *ccb = xccb;
3025 struct mpii_softc *sc = ccb->ccb_sc;
3026
3027 printf("%s: mpii_scsi_cmd_tmo (0x%08x)\n", DEVNAME(sc),
3028 mpii_read_db(sc));
3029
3030 mtx_enter(&sc->sc_ccb_mtx);
3031 if (ccb->ccb_state == MPII_CCB_QUEUED) {
3032 ccb->ccb_state = MPII_CCB_TIMEOUT;
3033 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
3034 }
3035 mtx_leave(&sc->sc_ccb_mtx);
3036
3037 scsi_ioh_add(&sc->sc_ccb_tmo_handler);
3038 }
3039
3040 void
mpii_scsi_cmd_tmo_handler(void * cookie,void * io)3041 mpii_scsi_cmd_tmo_handler(void *cookie, void *io)
3042 {
3043 struct mpii_softc *sc = cookie;
3044 struct mpii_ccb *tccb = io;
3045 struct mpii_ccb *ccb;
3046 struct mpii_msg_scsi_task_request *stq;
3047
3048 mtx_enter(&sc->sc_ccb_mtx);
3049 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
3050 if (ccb != NULL) {
3051 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3052 ccb->ccb_state = MPII_CCB_QUEUED;
3053 }
3054 /* should remove any other ccbs for the same dev handle */
3055 mtx_leave(&sc->sc_ccb_mtx);
3056
3057 if (ccb == NULL) {
3058 scsi_io_put(&sc->sc_iopool, tccb);
3059 return;
3060 }
3061
3062 stq = tccb->ccb_cmd;
3063 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
3064 stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
3065 htolem16(&stq->dev_handle, ccb->ccb_dev_handle);
3066
3067 tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3068 mpii_start(sc, tccb);
3069 }
3070
3071 void
mpii_scsi_cmd_tmo_done(struct mpii_ccb * tccb)3072 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3073 {
3074 mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb);
3075 }
3076
3077 void
mpii_scsi_cmd_done(struct mpii_ccb * ccb)3078 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3079 {
3080 struct mpii_ccb *tccb;
3081 struct mpii_msg_scsi_io_error *sie;
3082 struct mpii_softc *sc = ccb->ccb_sc;
3083 struct scsi_xfer *xs = ccb->ccb_cookie;
3084 struct scsi_sense_data *sense;
3085 bus_dmamap_t dmap = ccb->ccb_dmamap;
3086
3087 timeout_del(&xs->stimeout);
3088 mtx_enter(&sc->sc_ccb_mtx);
3089 if (ccb->ccb_state == MPII_CCB_TIMEOUT) {
3090 /* ENOSIMPLEQ_REMOVE :( */
3091 if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos))
3092 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3093 else {
3094 SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) {
3095 if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) {
3096 SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos,
3097 tccb, ccb_link);
3098 break;
3099 }
3100 }
3101 }
3102 }
3103
3104 ccb->ccb_state = MPII_CCB_READY;
3105 mtx_leave(&sc->sc_ccb_mtx);
3106
3107 if (xs->datalen != 0) {
3108 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3109 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3110 BUS_DMASYNC_POSTWRITE);
3111
3112 bus_dmamap_unload(sc->sc_dmat, dmap);
3113 }
3114
3115 xs->error = XS_NOERROR;
3116 xs->resid = 0;
3117
3118 if (ccb->ccb_rcb == NULL) {
3119 /* no scsi error, we're ok so drop out early */
3120 xs->status = SCSI_OK;
3121 goto done;
3122 }
3123
3124 sie = ccb->ccb_rcb->rcb_reply;
3125
3126 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3127 "flags 0x%x\n", DEVNAME(sc), xs->cmd.opcode, xs->datalen,
3128 xs->flags);
3129 DNPRINTF(MPII_D_CMD, "%s: dev_handle: %d msg_length: %d "
3130 "function: 0x%02x\n", DEVNAME(sc), lemtoh16(&sie->dev_handle),
3131 sie->msg_length, sie->function);
3132 DNPRINTF(MPII_D_CMD, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3133 sie->vp_id, sie->vf_id);
3134 DNPRINTF(MPII_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
3135 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3136 sie->scsi_state, lemtoh16(&sie->ioc_status));
3137 DNPRINTF(MPII_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3138 lemtoh32(&sie->ioc_loginfo));
3139 DNPRINTF(MPII_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
3140 lemtoh32(&sie->transfer_count));
3141 DNPRINTF(MPII_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
3142 lemtoh32(&sie->sense_count));
3143 DNPRINTF(MPII_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
3144 lemtoh32(&sie->response_info));
3145 DNPRINTF(MPII_D_CMD, "%s: task_tag: 0x%04x\n", DEVNAME(sc),
3146 lemtoh16(&sie->task_tag));
3147 DNPRINTF(MPII_D_CMD, "%s: bidirectional_transfer_count: 0x%08x\n",
3148 DEVNAME(sc), lemtoh32(&sie->bidirectional_transfer_count));
3149
3150 if (sie->scsi_state & MPII_SCSIIO_STATE_NO_SCSI_STATUS)
3151 xs->status = SCSI_TERMINATED;
3152 else
3153 xs->status = sie->scsi_status;
3154 xs->resid = 0;
3155
3156 switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3157 case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3158 xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
3159 /* FALLTHROUGH */
3160
3161 case MPII_IOCSTATUS_SUCCESS:
3162 case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3163 switch (xs->status) {
3164 case SCSI_OK:
3165 xs->error = XS_NOERROR;
3166 break;
3167
3168 case SCSI_CHECK:
3169 xs->error = XS_SENSE;
3170 break;
3171
3172 case SCSI_BUSY:
3173 case SCSI_QUEUE_FULL:
3174 xs->error = XS_BUSY;
3175 break;
3176
3177 default:
3178 xs->error = XS_DRIVER_STUFFUP;
3179 }
3180 break;
3181
3182 case MPII_IOCSTATUS_BUSY:
3183 case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3184 xs->error = XS_BUSY;
3185 break;
3186
3187 case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3188 case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3189 xs->error = XS_RESET;
3190 break;
3191
3192 case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3193 case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3194 xs->error = XS_SELTIMEOUT;
3195 break;
3196
3197 default:
3198 xs->error = XS_DRIVER_STUFFUP;
3199 break;
3200 }
3201
3202 sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd +
3203 sc->sc_request_size - sizeof(*sense));
3204 if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3205 memcpy(&xs->sense, sense, sizeof(xs->sense));
3206
3207 DNPRINTF(MPII_D_CMD, "%s: xs err: %d status: %#x\n", DEVNAME(sc),
3208 xs->error, xs->status);
3209
3210 mpii_push_reply(sc, ccb->ccb_rcb);
3211 done:
3212 KERNEL_LOCK();
3213 scsi_done(xs);
3214 KERNEL_UNLOCK();
3215 }
3216
3217 int
mpii_scsi_ioctl(struct scsi_link * link,u_long cmd,caddr_t addr,int flag)3218 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
3219 {
3220 struct mpii_softc *sc = link->bus->sb_adapter_softc;
3221 struct mpii_device *dev = sc->sc_devs[link->target];
3222
3223 DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3224
3225 switch (cmd) {
3226 case DIOCGCACHE:
3227 case DIOCSCACHE:
3228 if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3229 return (mpii_ioctl_cache(link, cmd,
3230 (struct dk_cache *)addr));
3231 }
3232 break;
3233
3234 default:
3235 if (sc->sc_ioctl)
3236 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
3237
3238 break;
3239 }
3240
3241 return (ENOTTY);
3242 }
3243
3244 int
mpii_ioctl_cache(struct scsi_link * link,u_long cmd,struct dk_cache * dc)3245 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3246 {
3247 struct mpii_softc *sc = link->bus->sb_adapter_softc;
3248 struct mpii_device *dev = sc->sc_devs[link->target];
3249 struct mpii_cfg_raid_vol_pg0 *vpg;
3250 struct mpii_msg_raid_action_request *req;
3251 struct mpii_msg_raid_action_reply *rep;
3252 struct mpii_cfg_hdr hdr;
3253 struct mpii_ccb *ccb;
3254 u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3255 size_t pagelen;
3256 int rv = 0;
3257 int enabled;
3258
3259 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3260 addr, MPII_PG_POLL, &hdr) != 0)
3261 return (EINVAL);
3262
3263 pagelen = hdr.page_length * 4;
3264 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3265 if (vpg == NULL)
3266 return (ENOMEM);
3267
3268 if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3269 vpg, pagelen) != 0) {
3270 rv = EINVAL;
3271 goto done;
3272 }
3273
3274 enabled = ((lemtoh16(&vpg->volume_settings) &
3275 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3276 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3277
3278 if (cmd == DIOCGCACHE) {
3279 dc->wrcache = enabled;
3280 dc->rdcache = 0;
3281 goto done;
3282 } /* else DIOCSCACHE */
3283
3284 if (dc->rdcache) {
3285 rv = EOPNOTSUPP;
3286 goto done;
3287 }
3288
3289 if (((dc->wrcache) ? 1 : 0) == enabled)
3290 goto done;
3291
3292 ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3293 if (ccb == NULL) {
3294 rv = ENOMEM;
3295 goto done;
3296 }
3297
3298 ccb->ccb_done = mpii_empty_done;
3299
3300 req = ccb->ccb_cmd;
3301 memset(req, 0, sizeof(*req));
3302 req->function = MPII_FUNCTION_RAID_ACTION;
3303 req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3304 htolem16(&req->vol_dev_handle, dev->dev_handle);
3305 htolem32(&req->action_data, dc->wrcache ?
3306 MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3307 MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3308
3309 if (mpii_poll(sc, ccb) != 0) {
3310 rv = EIO;
3311 goto done;
3312 }
3313
3314 if (ccb->ccb_rcb != NULL) {
3315 rep = ccb->ccb_rcb->rcb_reply;
3316 if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3317 ((rep->action_data[0] &
3318 MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3319 (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3320 MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3321 rv = EINVAL;
3322 mpii_push_reply(sc, ccb->ccb_rcb);
3323 }
3324
3325 scsi_io_put(&sc->sc_iopool, ccb);
3326
3327 done:
3328 free(vpg, M_TEMP, pagelen);
3329 return (rv);
3330 }
3331
3332 #if NBIO > 0
3333 int
mpii_ioctl(struct device * dev,u_long cmd,caddr_t addr)3334 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3335 {
3336 struct mpii_softc *sc = (struct mpii_softc *)dev;
3337 int error = 0;
3338
3339 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3340
3341 switch (cmd) {
3342 case BIOCINQ:
3343 DNPRINTF(MPII_D_IOCTL, "inq\n");
3344 error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3345 break;
3346 case BIOCVOL:
3347 DNPRINTF(MPII_D_IOCTL, "vol\n");
3348 error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3349 break;
3350 case BIOCDISK:
3351 DNPRINTF(MPII_D_IOCTL, "disk\n");
3352 error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3353 break;
3354 default:
3355 DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3356 error = ENOTTY;
3357 }
3358
3359 return (error);
3360 }
3361
3362 int
mpii_ioctl_inq(struct mpii_softc * sc,struct bioc_inq * bi)3363 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3364 {
3365 int i;
3366
3367 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3368
3369 strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3370 for (i = 0; i < sc->sc_max_devices; i++)
3371 if (sc->sc_devs[i] &&
3372 ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3373 bi->bi_novol++;
3374 return (0);
3375 }
3376
3377 int
mpii_ioctl_vol(struct mpii_softc * sc,struct bioc_vol * bv)3378 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3379 {
3380 struct mpii_cfg_raid_vol_pg0 *vpg;
3381 struct mpii_cfg_hdr hdr;
3382 struct mpii_device *dev;
3383 struct scsi_link *lnk;
3384 struct device *scdev;
3385 size_t pagelen;
3386 u_int16_t volh;
3387 int rv, hcnt = 0;
3388
3389 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3390 DEVNAME(sc), bv->bv_volid);
3391
3392 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3393 return (ENODEV);
3394 volh = dev->dev_handle;
3395
3396 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3397 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3398 printf("%s: unable to fetch header for raid volume page 0\n",
3399 DEVNAME(sc));
3400 return (EINVAL);
3401 }
3402
3403 pagelen = hdr.page_length * 4;
3404 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3405 if (vpg == NULL) {
3406 printf("%s: unable to allocate space for raid "
3407 "volume page 0\n", DEVNAME(sc));
3408 return (ENOMEM);
3409 }
3410
3411 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3412 &hdr, 1, vpg, pagelen) != 0) {
3413 printf("%s: unable to fetch raid volume page 0\n",
3414 DEVNAME(sc));
3415 free(vpg, M_TEMP, pagelen);
3416 return (EINVAL);
3417 }
3418
3419 switch (vpg->volume_state) {
3420 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3421 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3422 bv->bv_status = BIOC_SVONLINE;
3423 break;
3424 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3425 if (ISSET(lemtoh32(&vpg->volume_status),
3426 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3427 bv->bv_status = BIOC_SVREBUILD;
3428 bv->bv_percent = dev->percent;
3429 } else
3430 bv->bv_status = BIOC_SVDEGRADED;
3431 break;
3432 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3433 bv->bv_status = BIOC_SVOFFLINE;
3434 break;
3435 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3436 bv->bv_status = BIOC_SVBUILDING;
3437 break;
3438 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3439 default:
3440 bv->bv_status = BIOC_SVINVALID;
3441 break;
3442 }
3443
3444 switch (vpg->volume_type) {
3445 case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3446 bv->bv_level = 0;
3447 break;
3448 case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3449 bv->bv_level = 1;
3450 break;
3451 case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3452 bv->bv_level = 0x1E;
3453 break;
3454 case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3455 bv->bv_level = 10;
3456 break;
3457 default:
3458 bv->bv_level = -1;
3459 }
3460
3461 if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3462 free(vpg, M_TEMP, pagelen);
3463 return (rv);
3464 }
3465
3466 bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3467
3468 bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size);
3469
3470 lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0);
3471 if (lnk != NULL) {
3472 scdev = lnk->device_softc;
3473 strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev));
3474 }
3475
3476 free(vpg, M_TEMP, pagelen);
3477 return (0);
3478 }
3479
3480 int
mpii_ioctl_disk(struct mpii_softc * sc,struct bioc_disk * bd)3481 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3482 {
3483 struct mpii_cfg_raid_vol_pg0 *vpg;
3484 struct mpii_cfg_raid_vol_pg0_physdisk *pd;
3485 struct mpii_cfg_hdr hdr;
3486 struct mpii_device *dev;
3487 size_t pagelen;
3488 u_int16_t volh;
3489 u_int8_t dn;
3490
3491 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3492 DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3493
3494 if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL)
3495 return (ENODEV);
3496 volh = dev->dev_handle;
3497
3498 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3499 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3500 printf("%s: unable to fetch header for raid volume page 0\n",
3501 DEVNAME(sc));
3502 return (EINVAL);
3503 }
3504
3505 pagelen = hdr.page_length * 4;
3506 vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3507 if (vpg == NULL) {
3508 printf("%s: unable to allocate space for raid "
3509 "volume page 0\n", DEVNAME(sc));
3510 return (ENOMEM);
3511 }
3512
3513 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3514 &hdr, 1, vpg, pagelen) != 0) {
3515 printf("%s: unable to fetch raid volume page 0\n",
3516 DEVNAME(sc));
3517 free(vpg, M_TEMP, pagelen);
3518 return (EINVAL);
3519 }
3520
3521 if (bd->bd_diskid >= vpg->num_phys_disks) {
3522 int nvdsk = vpg->num_phys_disks;
3523 int hsmap = vpg->hot_spare_pool;
3524
3525 free(vpg, M_TEMP, pagelen);
3526 return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3527 }
3528
3529 pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3530 bd->bd_diskid;
3531 dn = pd->phys_disk_num;
3532
3533 free(vpg, M_TEMP, pagelen);
3534 return (mpii_bio_disk(sc, bd, dn));
3535 }
3536
3537 int
mpii_bio_hs(struct mpii_softc * sc,struct bioc_disk * bd,int nvdsk,int hsmap,int * hscnt)3538 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3539 int hsmap, int *hscnt)
3540 {
3541 struct mpii_cfg_raid_config_pg0 *cpg;
3542 struct mpii_raid_config_element *el;
3543 struct mpii_ecfg_hdr ehdr;
3544 size_t pagelen;
3545 int i, nhs = 0;
3546
3547 if (bd)
3548 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3549 bd->bd_diskid - nvdsk);
3550 else
3551 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3552
3553 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3554 0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3555 &ehdr) != 0) {
3556 printf("%s: unable to fetch header for raid config page 0\n",
3557 DEVNAME(sc));
3558 return (EINVAL);
3559 }
3560
3561 pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
3562 cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3563 if (cpg == NULL) {
3564 printf("%s: unable to allocate space for raid config page 0\n",
3565 DEVNAME(sc));
3566 return (ENOMEM);
3567 }
3568
3569 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3570 MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3571 printf("%s: unable to fetch raid config page 0\n",
3572 DEVNAME(sc));
3573 free(cpg, M_TEMP, pagelen);
3574 return (EINVAL);
3575 }
3576
3577 el = (struct mpii_raid_config_element *)(cpg + 1);
3578 for (i = 0; i < cpg->num_elements; i++, el++) {
3579 if (ISSET(lemtoh16(&el->element_flags),
3580 MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3581 el->hot_spare_pool == hsmap) {
3582 /*
3583 * diskid comparison is based on the idea that all
3584 * disks are counted by the bio(4) in sequence, thus
3585 * subtracting the number of disks in the volume
3586 * from the diskid yields us a "relative" hotspare
3587 * number, which is good enough for us.
3588 */
3589 if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3590 u_int8_t dn = el->phys_disk_num;
3591
3592 free(cpg, M_TEMP, pagelen);
3593 return (mpii_bio_disk(sc, bd, dn));
3594 }
3595 nhs++;
3596 }
3597 }
3598
3599 if (hscnt)
3600 *hscnt = nhs;
3601
3602 free(cpg, M_TEMP, pagelen);
3603 return (0);
3604 }
3605
3606 int
mpii_bio_disk(struct mpii_softc * sc,struct bioc_disk * bd,u_int8_t dn)3607 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3608 {
3609 struct mpii_cfg_raid_physdisk_pg0 *ppg;
3610 struct mpii_cfg_hdr hdr;
3611 struct mpii_device *dev;
3612 int len;
3613
3614 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3615 bd->bd_diskid);
3616
3617 ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3618 if (ppg == NULL) {
3619 printf("%s: unable to allocate space for raid physical disk "
3620 "page 0\n", DEVNAME(sc));
3621 return (ENOMEM);
3622 }
3623
3624 hdr.page_version = 0;
3625 hdr.page_length = sizeof(*ppg) / 4;
3626 hdr.page_number = 0;
3627 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3628
3629 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3630 &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3631 printf("%s: unable to fetch raid drive page 0\n",
3632 DEVNAME(sc));
3633 free(ppg, M_TEMP, sizeof(*ppg));
3634 return (EINVAL);
3635 }
3636
3637 bd->bd_target = ppg->phys_disk_num;
3638
3639 if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) {
3640 bd->bd_status = BIOC_SDINVALID;
3641 free(ppg, M_TEMP, sizeof(*ppg));
3642 return (0);
3643 }
3644
3645 switch (ppg->phys_disk_state) {
3646 case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3647 case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3648 bd->bd_status = BIOC_SDONLINE;
3649 break;
3650 case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3651 if (ppg->offline_reason ==
3652 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3653 ppg->offline_reason ==
3654 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3655 bd->bd_status = BIOC_SDFAILED;
3656 else
3657 bd->bd_status = BIOC_SDOFFLINE;
3658 break;
3659 case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3660 bd->bd_status = BIOC_SDFAILED;
3661 break;
3662 case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3663 bd->bd_status = BIOC_SDREBUILD;
3664 break;
3665 case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3666 bd->bd_status = BIOC_SDHOTSPARE;
3667 break;
3668 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3669 bd->bd_status = BIOC_SDUNUSED;
3670 break;
3671 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3672 default:
3673 bd->bd_status = BIOC_SDINVALID;
3674 break;
3675 }
3676
3677 bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size);
3678
3679 scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id));
3680 len = strlen(bd->bd_vendor);
3681 bd->bd_vendor[len] = ' ';
3682 scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id,
3683 sizeof(ppg->product_id));
3684 scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial));
3685
3686 free(ppg, M_TEMP, sizeof(*ppg));
3687 return (0);
3688 }
3689
3690 struct mpii_device *
mpii_find_vol(struct mpii_softc * sc,int volid)3691 mpii_find_vol(struct mpii_softc *sc, int volid)
3692 {
3693 struct mpii_device *dev = NULL;
3694
3695 if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3696 return (NULL);
3697 dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3698 if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3699 return (dev);
3700 return (NULL);
3701 }
3702
3703 #ifndef SMALL_KERNEL
3704 /*
3705 * Non-sleeping lightweight version of the mpii_ioctl_vol
3706 */
3707 int
mpii_bio_volstate(struct mpii_softc * sc,struct bioc_vol * bv)3708 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3709 {
3710 struct mpii_cfg_raid_vol_pg0 *vpg;
3711 struct mpii_cfg_hdr hdr;
3712 struct mpii_device *dev = NULL;
3713 size_t pagelen;
3714 u_int16_t volh;
3715
3716 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3717 return (ENODEV);
3718 volh = dev->dev_handle;
3719
3720 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3721 MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3722 DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3723 "volume page 0\n", DEVNAME(sc));
3724 return (EINVAL);
3725 }
3726
3727 pagelen = hdr.page_length * 4;
3728 vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3729 if (vpg == NULL) {
3730 DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3731 "volume page 0\n", DEVNAME(sc));
3732 return (ENOMEM);
3733 }
3734
3735 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3736 MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3737 DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3738 "page 0\n", DEVNAME(sc));
3739 free(vpg, M_TEMP, pagelen);
3740 return (EINVAL);
3741 }
3742
3743 switch (vpg->volume_state) {
3744 case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3745 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3746 bv->bv_status = BIOC_SVONLINE;
3747 break;
3748 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3749 if (ISSET(lemtoh32(&vpg->volume_status),
3750 MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3751 bv->bv_status = BIOC_SVREBUILD;
3752 else
3753 bv->bv_status = BIOC_SVDEGRADED;
3754 break;
3755 case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3756 bv->bv_status = BIOC_SVOFFLINE;
3757 break;
3758 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3759 bv->bv_status = BIOC_SVBUILDING;
3760 break;
3761 case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3762 default:
3763 bv->bv_status = BIOC_SVINVALID;
3764 break;
3765 }
3766
3767 free(vpg, M_TEMP, pagelen);
3768 return (0);
3769 }
3770
3771 int
mpii_create_sensors(struct mpii_softc * sc)3772 mpii_create_sensors(struct mpii_softc *sc)
3773 {
3774 struct scsibus_softc *ssc = sc->sc_scsibus;
3775 struct device *dev;
3776 struct scsi_link *link;
3777 int i;
3778
3779 sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor),
3780 M_DEVBUF, M_NOWAIT | M_ZERO);
3781 if (sc->sc_sensors == NULL)
3782 return (1);
3783 sc->sc_nsensors = sc->sc_vd_count;
3784
3785 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3786 sizeof(sc->sc_sensordev.xname));
3787
3788 for (i = 0; i < sc->sc_vd_count; i++) {
3789 link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0);
3790 if (link == NULL)
3791 goto bad;
3792
3793 dev = link->device_softc;
3794
3795 sc->sc_sensors[i].type = SENSOR_DRIVE;
3796 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3797
3798 strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
3799 sizeof(sc->sc_sensors[i].desc));
3800
3801 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
3802 }
3803
3804 if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL)
3805 goto bad;
3806
3807 sensordev_install(&sc->sc_sensordev);
3808
3809 return (0);
3810
3811 bad:
3812 free(sc->sc_sensors, M_DEVBUF, 0);
3813
3814 return (1);
3815 }
3816
3817 void
mpii_refresh_sensors(void * arg)3818 mpii_refresh_sensors(void *arg)
3819 {
3820 struct mpii_softc *sc = arg;
3821 struct bioc_vol bv;
3822 int i;
3823
3824 for (i = 0; i < sc->sc_nsensors; i++) {
3825 memset(&bv, 0, sizeof(bv));
3826 bv.bv_volid = i;
3827 if (mpii_bio_volstate(sc, &bv))
3828 return;
3829 switch(bv.bv_status) {
3830 case BIOC_SVOFFLINE:
3831 sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
3832 sc->sc_sensors[i].status = SENSOR_S_CRIT;
3833 break;
3834 case BIOC_SVDEGRADED:
3835 sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
3836 sc->sc_sensors[i].status = SENSOR_S_WARN;
3837 break;
3838 case BIOC_SVREBUILD:
3839 sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD;
3840 sc->sc_sensors[i].status = SENSOR_S_WARN;
3841 break;
3842 case BIOC_SVONLINE:
3843 sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
3844 sc->sc_sensors[i].status = SENSOR_S_OK;
3845 break;
3846 case BIOC_SVINVALID:
3847 /* FALLTHROUGH */
3848 default:
3849 sc->sc_sensors[i].value = 0; /* unknown */
3850 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3851 }
3852 }
3853 }
3854 #endif /* SMALL_KERNEL */
3855 #endif /* NBIO > 0 */
3856