1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2009 Yahoo! Inc.
5 * Copyright (c) 2011-2015 LSI Corp.
6 * Copyright (c) 2013-2015 Avago Technologies
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD
31 */
32
33 #include <sys/cdefs.h>
34 /* Communications core for Avago Technologies (LSI) MPT2 */
35
36 /* TODO Move headers to mpsvar */
37 #include <sys/types.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/selinfo.h>
42 #include <sys/lock.h>
43 #include <sys/mutex.h>
44 #include <sys/module.h>
45 #include <sys/bus.h>
46 #include <sys/conf.h>
47 #include <sys/bio.h>
48 #include <sys/malloc.h>
49 #include <sys/uio.h>
50 #include <sys/sysctl.h>
51 #include <sys/smp.h>
52 #include <sys/queue.h>
53 #include <sys/kthread.h>
54 #include <sys/taskqueue.h>
55 #include <sys/endian.h>
56 #include <sys/eventhandler.h>
57 #include <sys/sbuf.h>
58 #include <sys/priv.h>
59
60 #include <machine/bus.h>
61 #include <machine/resource.h>
62 #include <sys/rman.h>
63 #include <sys/proc.h>
64
65 #include <dev/pci/pcivar.h>
66
67 #include <cam/cam.h>
68 #include <cam/scsi/scsi_all.h>
69
70 #include <dev/mps/mpi/mpi2_type.h>
71 #include <dev/mps/mpi/mpi2.h>
72 #include <dev/mps/mpi/mpi2_ioc.h>
73 #include <dev/mps/mpi/mpi2_sas.h>
74 #include <dev/mps/mpi/mpi2_cnfg.h>
75 #include <dev/mps/mpi/mpi2_init.h>
76 #include <dev/mps/mpi/mpi2_tool.h>
77 #include <dev/mps/mps_ioctl.h>
78 #include <dev/mps/mpsvar.h>
79 #include <dev/mps/mps_table.h>
80
81 static int mps_diag_reset(struct mps_softc *sc, int sleep_flag);
82 static int mps_init_queues(struct mps_softc *sc);
83 static void mps_resize_queues(struct mps_softc *sc);
84 static int mps_message_unit_reset(struct mps_softc *sc, int sleep_flag);
85 static int mps_transition_operational(struct mps_softc *sc);
86 static int mps_iocfacts_allocate(struct mps_softc *sc, uint8_t attaching);
87 static void mps_iocfacts_free(struct mps_softc *sc);
88 static void mps_startup(void *arg);
89 static int mps_send_iocinit(struct mps_softc *sc);
90 static int mps_alloc_queues(struct mps_softc *sc);
91 static int mps_alloc_hw_queues(struct mps_softc *sc);
92 static int mps_alloc_replies(struct mps_softc *sc);
93 static int mps_alloc_requests(struct mps_softc *sc);
94 static int mps_attach_log(struct mps_softc *sc);
95 static __inline void mps_complete_command(struct mps_softc *sc,
96 struct mps_command *cm);
97 static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
98 MPI2_EVENT_NOTIFICATION_REPLY *reply);
99 static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm);
100 static void mps_periodic(void *);
101 static int mps_reregister_events(struct mps_softc *sc);
102 static void mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm);
103 static int mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts);
104 static int mps_wait_db_ack(struct mps_softc *sc, int timeout, int sleep_flag);
105 static int mps_debug_sysctl(SYSCTL_HANDLER_ARGS);
106 static int mps_dump_reqs(SYSCTL_HANDLER_ARGS);
107 static void mps_parse_debug(struct mps_softc *sc, char *list);
108
109 SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
110 "MPS Driver Parameters");
111
112 MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory");
113 MALLOC_DECLARE(M_MPSUSER);
114
115 /*
116 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of
117 * any state and back to its initialization state machine.
118 */
119 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d };
120
121 /* Added this union to smoothly convert le64toh cm->cm_desc.Words.
122 * Compiler only support unint64_t to be passed as argument.
123 * Otherwise it will throw below error
124 * "aggregate value used where an integer was expected"
125 */
126
127 typedef union {
128 u64 word;
129 struct {
130 u32 low;
131 u32 high;
132 } u;
133 } request_descriptor_t;
134
135 /* Rate limit chain-fail messages to 1 per minute */
136 static struct timeval mps_chainfail_interval = { 60, 0 };
137
138 /*
139 * sleep_flag can be either CAN_SLEEP or NO_SLEEP.
140 * If this function is called from process context, it can sleep
141 * and there is no harm to sleep, in case if this fuction is called
142 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set.
143 * based on sleep flags driver will call either msleep, pause or DELAY.
144 * msleep and pause are of same variant, but pause is used when mps_mtx
145 * is not hold by driver.
146 *
147 */
148 static int
mps_diag_reset(struct mps_softc * sc,int sleep_flag)149 mps_diag_reset(struct mps_softc *sc,int sleep_flag)
150 {
151 uint32_t reg;
152 int i, error, tries = 0;
153 uint8_t first_wait_done = FALSE;
154
155 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
156
157 /* Clear any pending interrupts */
158 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
159
160 /*
161 * Force NO_SLEEP for threads prohibited to sleep
162 * e.a Thread from interrupt handler are prohibited to sleep.
163 */
164 if (curthread->td_no_sleeping != 0)
165 sleep_flag = NO_SLEEP;
166
167 mps_dprint(sc, MPS_INIT, "sequence start, sleep_flag= %d\n", sleep_flag);
168
169 /* Push the magic sequence */
170 error = ETIMEDOUT;
171 while (tries++ < 20) {
172 for (i = 0; i < sizeof(mpt2_reset_magic); i++)
173 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET,
174 mpt2_reset_magic[i]);
175 /* wait 100 msec */
176 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
177 msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0,
178 "mpsdiag", hz/10);
179 else if (sleep_flag == CAN_SLEEP)
180 pause("mpsdiag", hz/10);
181 else
182 DELAY(100 * 1000);
183
184 reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
185 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) {
186 error = 0;
187 break;
188 }
189 }
190 if (error) {
191 mps_dprint(sc, MPS_INIT, "sequence failed, error=%d, exit\n",
192 error);
193 return (error);
194 }
195
196 /* Send the actual reset. XXX need to refresh the reg? */
197 reg |= MPI2_DIAG_RESET_ADAPTER;
198 mps_dprint(sc, MPS_INIT, "sequence success, sending reset, reg= 0x%x\n",
199 reg);
200 mps_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg);
201
202 /* Wait up to 300 seconds in 50ms intervals */
203 error = ETIMEDOUT;
204 for (i = 0; i < 6000; i++) {
205 /*
206 * Wait 50 msec. If this is the first time through, wait 256
207 * msec to satisfy Diag Reset timing requirements.
208 */
209 if (first_wait_done) {
210 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
211 msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0,
212 "mpsdiag", hz/20);
213 else if (sleep_flag == CAN_SLEEP)
214 pause("mpsdiag", hz/20);
215 else
216 DELAY(50 * 1000);
217 } else {
218 DELAY(256 * 1000);
219 first_wait_done = TRUE;
220 }
221 /*
222 * Check for the RESET_ADAPTER bit to be cleared first, then
223 * wait for the RESET state to be cleared, which takes a little
224 * longer.
225 */
226 reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET);
227 if (reg & MPI2_DIAG_RESET_ADAPTER) {
228 continue;
229 }
230 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
231 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) {
232 error = 0;
233 break;
234 }
235 }
236 if (error) {
237 mps_dprint(sc, MPS_INIT, "reset failed, error= %d, exit\n",
238 error);
239 return (error);
240 }
241
242 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0);
243 mps_dprint(sc, MPS_INIT, "diag reset success, exit\n");
244
245 return (0);
246 }
247
248 static int
mps_message_unit_reset(struct mps_softc * sc,int sleep_flag)249 mps_message_unit_reset(struct mps_softc *sc, int sleep_flag)
250 {
251 int error;
252
253 MPS_FUNCTRACE(sc);
254
255 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
256
257 error = 0;
258 mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
259 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET <<
260 MPI2_DOORBELL_FUNCTION_SHIFT);
261
262 if (mps_wait_db_ack(sc, 5, sleep_flag) != 0) {
263 mps_dprint(sc, MPS_INIT|MPS_FAULT,
264 "Doorbell handshake failed\n");
265 error = ETIMEDOUT;
266 }
267
268 mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
269 return (error);
270 }
271
272 static int
mps_transition_ready(struct mps_softc * sc)273 mps_transition_ready(struct mps_softc *sc)
274 {
275 uint32_t reg, state;
276 int error, tries = 0;
277 int sleep_flags;
278
279 MPS_FUNCTRACE(sc);
280 /* If we are in attach call, do not sleep */
281 sleep_flags = (sc->mps_flags & MPS_FLAGS_ATTACH_DONE)
282 ? CAN_SLEEP:NO_SLEEP;
283 error = 0;
284
285 mps_dprint(sc, MPS_INIT, "%s entered, sleep_flags= %d\n",
286 __func__, sleep_flags);
287
288 while (tries++ < 1200) {
289 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
290 mps_dprint(sc, MPS_INIT, " Doorbell= 0x%x\n", reg);
291
292 /*
293 * Ensure the IOC is ready to talk. If it's not, try
294 * resetting it.
295 */
296 if (reg & MPI2_DOORBELL_USED) {
297 mps_dprint(sc, MPS_INIT, " Not ready, sending diag "
298 "reset\n");
299 mps_diag_reset(sc, sleep_flags);
300 DELAY(50000);
301 continue;
302 }
303
304 /* Is the adapter owned by another peer? */
305 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) ==
306 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) {
307 mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC is under the "
308 "control of another peer host, aborting "
309 "initialization.\n");
310 error = ENXIO;
311 break;
312 }
313
314 state = reg & MPI2_IOC_STATE_MASK;
315 if (state == MPI2_IOC_STATE_READY) {
316 /* Ready to go! */
317 error = 0;
318 break;
319 } else if (state == MPI2_IOC_STATE_FAULT) {
320 mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC in fault "
321 "state 0x%x, resetting\n",
322 state & MPI2_DOORBELL_FAULT_CODE_MASK);
323 mps_diag_reset(sc, sleep_flags);
324 } else if (state == MPI2_IOC_STATE_OPERATIONAL) {
325 /* Need to take ownership */
326 mps_message_unit_reset(sc, sleep_flags);
327 } else if (state == MPI2_IOC_STATE_RESET) {
328 /* Wait a bit, IOC might be in transition */
329 mps_dprint(sc, MPS_INIT|MPS_FAULT,
330 "IOC in unexpected reset state\n");
331 } else {
332 mps_dprint(sc, MPS_INIT|MPS_FAULT,
333 "IOC in unknown state 0x%x\n", state);
334 error = EINVAL;
335 break;
336 }
337
338 /* Wait 50ms for things to settle down. */
339 DELAY(50000);
340 }
341
342 if (error)
343 mps_dprint(sc, MPS_INIT|MPS_FAULT,
344 "Cannot transition IOC to ready\n");
345 mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
346
347 return (error);
348 }
349
350 static int
mps_transition_operational(struct mps_softc * sc)351 mps_transition_operational(struct mps_softc *sc)
352 {
353 uint32_t reg, state;
354 int error;
355
356 MPS_FUNCTRACE(sc);
357
358 error = 0;
359 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET);
360 mps_dprint(sc, MPS_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg);
361
362 state = reg & MPI2_IOC_STATE_MASK;
363 if (state != MPI2_IOC_STATE_READY) {
364 mps_dprint(sc, MPS_INIT, "IOC not ready\n");
365 if ((error = mps_transition_ready(sc)) != 0) {
366 mps_dprint(sc, MPS_INIT|MPS_FAULT,
367 "failed to transition ready, exit\n");
368 return (error);
369 }
370 }
371
372 error = mps_send_iocinit(sc);
373 mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
374
375 return (error);
376 }
377
378 static void
mps_resize_queues(struct mps_softc * sc)379 mps_resize_queues(struct mps_softc *sc)
380 {
381 u_int reqcr, prireqcr, maxio, sges_per_frame;
382
383 /*
384 * Size the queues. Since the reply queues always need one free
385 * entry, we'll deduct one reply message here. The LSI documents
386 * suggest instead to add a count to the request queue, but I think
387 * that it's better to deduct from reply queue.
388 */
389 prireqcr = MAX(1, sc->max_prireqframes);
390 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit);
391
392 reqcr = MAX(2, sc->max_reqframes);
393 reqcr = MIN(reqcr, sc->facts->RequestCredit);
394
395 sc->num_reqs = prireqcr + reqcr;
396 sc->num_prireqs = prireqcr;
397 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes,
398 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1;
399
400 /* Store the request frame size in bytes rather than as 32bit words */
401 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4;
402
403 /*
404 * Max IO Size is Page Size * the following:
405 * ((SGEs per frame - 1 for chain element) * Max Chain Depth)
406 * + 1 for no chain needed in last frame
407 *
408 * If user suggests a Max IO size to use, use the smaller of the
409 * user's value and the calculated value as long as the user's
410 * value is larger than 0. The user's value is in pages.
411 */
412 sges_per_frame = sc->reqframesz / sizeof(MPI2_SGE_SIMPLE64) - 1;
413 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE;
414
415 /*
416 * If I/O size limitation requested, then use it and pass up to CAM.
417 * If not, use maxphys as an optimization hint, but report HW limit.
418 */
419 if (sc->max_io_pages > 0) {
420 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE);
421 sc->maxio = maxio;
422 } else {
423 sc->maxio = maxio;
424 maxio = min(maxio, maxphys);
425 }
426
427 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) /
428 sges_per_frame * reqcr;
429 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains)
430 sc->num_chains = sc->max_chains;
431
432 /*
433 * Figure out the number of MSIx-based queues. If the firmware or
434 * user has done something crazy and not allowed enough credit for
435 * the queues to be useful then don't enable multi-queue.
436 */
437 if (sc->facts->MaxMSIxVectors < 2)
438 sc->msi_msgs = 1;
439
440 if (sc->msi_msgs > 1) {
441 sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus);
442 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors);
443 if (sc->num_reqs / sc->msi_msgs < 2)
444 sc->msi_msgs = 1;
445 }
446
447 mps_dprint(sc, MPS_INIT, "Sized queues to q=%d reqs=%d replies=%d\n",
448 sc->msi_msgs, sc->num_reqs, sc->num_replies);
449 }
450
451 /*
452 * This is called during attach and when re-initializing due to a Diag Reset.
453 * IOC Facts is used to allocate many of the structures needed by the driver.
454 * If called from attach, de-allocation is not required because the driver has
455 * not allocated any structures yet, but if called from a Diag Reset, previously
456 * allocated structures based on IOC Facts will need to be freed and re-
457 * allocated bases on the latest IOC Facts.
458 */
459 static int
mps_iocfacts_allocate(struct mps_softc * sc,uint8_t attaching)460 mps_iocfacts_allocate(struct mps_softc *sc, uint8_t attaching)
461 {
462 int error;
463 Mpi2IOCFactsReply_t saved_facts;
464 uint8_t saved_mode, reallocating;
465
466 mps_dprint(sc, MPS_INIT|MPS_TRACE, "%s entered\n", __func__);
467
468 /* Save old IOC Facts and then only reallocate if Facts have changed */
469 if (!attaching) {
470 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY));
471 }
472
473 /*
474 * Get IOC Facts. In all cases throughout this function, panic if doing
475 * a re-initialization and only return the error if attaching so the OS
476 * can handle it.
477 */
478 if ((error = mps_get_iocfacts(sc, sc->facts)) != 0) {
479 if (attaching) {
480 mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to get "
481 "IOC Facts with error %d, exit\n", error);
482 return (error);
483 } else {
484 panic("%s failed to get IOC Facts with error %d\n",
485 __func__, error);
486 }
487 }
488
489 MPS_DPRINT_PAGE(sc, MPS_XINFO, iocfacts, sc->facts);
490
491 snprintf(sc->fw_version, sizeof(sc->fw_version),
492 "%02d.%02d.%02d.%02d",
493 sc->facts->FWVersion.Struct.Major,
494 sc->facts->FWVersion.Struct.Minor,
495 sc->facts->FWVersion.Struct.Unit,
496 sc->facts->FWVersion.Struct.Dev);
497
498 snprintf(sc->msg_version, sizeof(sc->msg_version), "%d.%d",
499 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK) >>
500 MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT,
501 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MINOR_MASK) >>
502 MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT);
503
504 mps_dprint(sc, MPS_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version,
505 MPS_DRIVER_VERSION);
506 mps_dprint(sc, MPS_INFO, "IOCCapabilities: %b\n",
507 sc->facts->IOCCapabilities,
508 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf"
509 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR"
510 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc");
511
512 /*
513 * If the chip doesn't support event replay then a hard reset will be
514 * required to trigger a full discovery. Do the reset here then
515 * retransition to Ready. A hard reset might have already been done,
516 * but it doesn't hurt to do it again. Only do this if attaching, not
517 * for a Diag Reset.
518 */
519 if (attaching && ((sc->facts->IOCCapabilities &
520 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) {
521 mps_dprint(sc, MPS_INIT, "No event replay, reseting\n");
522 mps_diag_reset(sc, NO_SLEEP);
523 if ((error = mps_transition_ready(sc)) != 0) {
524 mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to "
525 "transition to ready with error %d, exit\n",
526 error);
527 return (error);
528 }
529 }
530
531 /*
532 * Set flag if IR Firmware is loaded. If the RAID Capability has
533 * changed from the previous IOC Facts, log a warning, but only if
534 * checking this after a Diag Reset and not during attach.
535 */
536 saved_mode = sc->ir_firmware;
537 if (sc->facts->IOCCapabilities &
538 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)
539 sc->ir_firmware = 1;
540 if (!attaching) {
541 if (sc->ir_firmware != saved_mode) {
542 mps_dprint(sc, MPS_INIT|MPS_FAULT, "new IR/IT mode "
543 "in IOC Facts does not match previous mode\n");
544 }
545 }
546
547 /* Only deallocate and reallocate if relevant IOC Facts have changed */
548 reallocating = FALSE;
549 sc->mps_flags &= ~MPS_FLAGS_REALLOCATED;
550
551 if ((!attaching) &&
552 ((saved_facts.MsgVersion != sc->facts->MsgVersion) ||
553 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) ||
554 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) ||
555 (saved_facts.RequestCredit != sc->facts->RequestCredit) ||
556 (saved_facts.ProductID != sc->facts->ProductID) ||
557 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) ||
558 (saved_facts.IOCRequestFrameSize !=
559 sc->facts->IOCRequestFrameSize) ||
560 (saved_facts.MaxTargets != sc->facts->MaxTargets) ||
561 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) ||
562 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) ||
563 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) ||
564 (saved_facts.MaxReplyDescriptorPostQueueDepth !=
565 sc->facts->MaxReplyDescriptorPostQueueDepth) ||
566 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) ||
567 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) ||
568 (saved_facts.MaxPersistentEntries !=
569 sc->facts->MaxPersistentEntries))) {
570 reallocating = TRUE;
571
572 /* Record that we reallocated everything */
573 sc->mps_flags |= MPS_FLAGS_REALLOCATED;
574 }
575
576 /*
577 * Some things should be done if attaching or re-allocating after a Diag
578 * Reset, but are not needed after a Diag Reset if the FW has not
579 * changed.
580 */
581 if (attaching || reallocating) {
582 /*
583 * Check if controller supports FW diag buffers and set flag to
584 * enable each type.
585 */
586 if (sc->facts->IOCCapabilities &
587 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
588 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].
589 enabled = TRUE;
590 if (sc->facts->IOCCapabilities &
591 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
592 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].
593 enabled = TRUE;
594 if (sc->facts->IOCCapabilities &
595 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
596 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].
597 enabled = TRUE;
598
599 /*
600 * Set flag if EEDP is supported and if TLR is supported.
601 */
602 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP)
603 sc->eedp_enabled = TRUE;
604 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)
605 sc->control_TLR = TRUE;
606
607 mps_resize_queues(sc);
608
609 /*
610 * Initialize all Tail Queues
611 */
612 TAILQ_INIT(&sc->req_list);
613 TAILQ_INIT(&sc->high_priority_req_list);
614 TAILQ_INIT(&sc->chain_list);
615 TAILQ_INIT(&sc->tm_list);
616 }
617
618 /*
619 * If doing a Diag Reset and the FW is significantly different
620 * (reallocating will be set above in IOC Facts comparison), then all
621 * buffers based on the IOC Facts will need to be freed before they are
622 * reallocated.
623 */
624 if (reallocating) {
625 mps_iocfacts_free(sc);
626 mpssas_realloc_targets(sc, saved_facts.MaxTargets +
627 saved_facts.MaxVolumes);
628 }
629
630 /*
631 * Any deallocation has been completed. Now start reallocating
632 * if needed. Will only need to reallocate if attaching or if the new
633 * IOC Facts are different from the previous IOC Facts after a Diag
634 * Reset. Targets have already been allocated above if needed.
635 */
636 error = 0;
637 while (attaching || reallocating) {
638 if ((error = mps_alloc_hw_queues(sc)) != 0)
639 break;
640 if ((error = mps_alloc_replies(sc)) != 0)
641 break;
642 if ((error = mps_alloc_requests(sc)) != 0)
643 break;
644 if ((error = mps_alloc_queues(sc)) != 0)
645 break;
646
647 break;
648 }
649 if (error) {
650 mps_dprint(sc, MPS_INIT|MPS_FAULT,
651 "Failed to alloc queues with error %d\n", error);
652 mps_free(sc);
653 return (error);
654 }
655
656 /* Always initialize the queues */
657 bzero(sc->free_queue, sc->fqdepth * 4);
658 mps_init_queues(sc);
659
660 /*
661 * Always get the chip out of the reset state, but only panic if not
662 * attaching. If attaching and there is an error, that is handled by
663 * the OS.
664 */
665 error = mps_transition_operational(sc);
666 if (error != 0) {
667 mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to "
668 "transition to operational with error %d\n", error);
669 mps_free(sc);
670 return (error);
671 }
672
673 /*
674 * Finish the queue initialization.
675 * These are set here instead of in mps_init_queues() because the
676 * IOC resets these values during the state transition in
677 * mps_transition_operational(). The free index is set to 1
678 * because the corresponding index in the IOC is set to 0, and the
679 * IOC treats the queues as full if both are set to the same value.
680 * Hence the reason that the queue can't hold all of the possible
681 * replies.
682 */
683 sc->replypostindex = 0;
684 mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex);
685 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0);
686
687 /*
688 * Attach the subsystems so they can prepare their event masks.
689 * XXX Should be dynamic so that IM/IR and user modules can attach
690 */
691 error = 0;
692 while (attaching) {
693 mps_dprint(sc, MPS_INIT, "Attaching subsystems\n");
694 if ((error = mps_attach_log(sc)) != 0)
695 break;
696 if ((error = mps_attach_sas(sc)) != 0)
697 break;
698 if ((error = mps_attach_user(sc)) != 0)
699 break;
700 break;
701 }
702 if (error) {
703 mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to attach all "
704 "subsystems: error %d\n", error);
705 mps_free(sc);
706 return (error);
707 }
708
709 /*
710 * XXX If the number of MSI-X vectors changes during re-init, this
711 * won't see it and adjust.
712 */
713 if (attaching && (error = mps_pci_setup_interrupts(sc)) != 0) {
714 mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to setup "
715 "interrupts\n");
716 mps_free(sc);
717 return (error);
718 }
719
720 /*
721 * Set flag if this is a WD controller. This shouldn't ever change, but
722 * reset it after a Diag Reset, just in case.
723 */
724 sc->WD_available = FALSE;
725 if (pci_get_device(sc->mps_dev) == MPI2_MFGPAGE_DEVID_SSS6200)
726 sc->WD_available = TRUE;
727
728 return (error);
729 }
730
731 /*
732 * This is called if memory is being free (during detach for example) and when
733 * buffers need to be reallocated due to a Diag Reset.
734 */
735 static void
mps_iocfacts_free(struct mps_softc * sc)736 mps_iocfacts_free(struct mps_softc *sc)
737 {
738 struct mps_command *cm;
739 int i;
740
741 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
742
743 if (sc->free_busaddr != 0)
744 bus_dmamap_unload(sc->queues_dmat, sc->queues_map);
745 if (sc->free_queue != NULL)
746 bus_dmamem_free(sc->queues_dmat, sc->free_queue,
747 sc->queues_map);
748 if (sc->queues_dmat != NULL)
749 bus_dma_tag_destroy(sc->queues_dmat);
750
751 if (sc->chain_frames != NULL) {
752 bus_dmamap_unload(sc->chain_dmat, sc->chain_map);
753 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
754 sc->chain_map);
755 }
756 if (sc->chain_dmat != NULL)
757 bus_dma_tag_destroy(sc->chain_dmat);
758
759 if (sc->sense_busaddr != 0)
760 bus_dmamap_unload(sc->sense_dmat, sc->sense_map);
761 if (sc->sense_frames != NULL)
762 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,
763 sc->sense_map);
764 if (sc->sense_dmat != NULL)
765 bus_dma_tag_destroy(sc->sense_dmat);
766
767 if (sc->reply_busaddr != 0)
768 bus_dmamap_unload(sc->reply_dmat, sc->reply_map);
769 if (sc->reply_frames != NULL)
770 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,
771 sc->reply_map);
772 if (sc->reply_dmat != NULL)
773 bus_dma_tag_destroy(sc->reply_dmat);
774
775 if (sc->req_busaddr != 0)
776 bus_dmamap_unload(sc->req_dmat, sc->req_map);
777 if (sc->req_frames != NULL)
778 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map);
779 if (sc->req_dmat != NULL)
780 bus_dma_tag_destroy(sc->req_dmat);
781
782 if (sc->chains != NULL)
783 free(sc->chains, M_MPT2);
784 if (sc->commands != NULL) {
785 for (i = 1; i < sc->num_reqs; i++) {
786 cm = &sc->commands[i];
787 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap);
788 }
789 free(sc->commands, M_MPT2);
790 }
791 if (sc->buffer_dmat != NULL)
792 bus_dma_tag_destroy(sc->buffer_dmat);
793
794 mps_pci_free_interrupts(sc);
795 free(sc->queues, M_MPT2);
796 sc->queues = NULL;
797 }
798
799 /*
800 * The terms diag reset and hard reset are used interchangeably in the MPI
801 * docs to mean resetting the controller chip. In this code diag reset
802 * cleans everything up, and the hard reset function just sends the reset
803 * sequence to the chip. This should probably be refactored so that every
804 * subsystem gets a reset notification of some sort, and can clean up
805 * appropriately.
806 */
807 int
mps_reinit(struct mps_softc * sc)808 mps_reinit(struct mps_softc *sc)
809 {
810 int error;
811 struct mpssas_softc *sassc;
812
813 sassc = sc->sassc;
814
815 MPS_FUNCTRACE(sc);
816
817 mtx_assert(&sc->mps_mtx, MA_OWNED);
818
819 mps_dprint(sc, MPS_INIT|MPS_INFO, "Reinitializing controller\n");
820 if (sc->mps_flags & MPS_FLAGS_DIAGRESET) {
821 mps_dprint(sc, MPS_INIT, "Reset already in progress\n");
822 return 0;
823 }
824
825 /* make sure the completion callbacks can recognize they're getting
826 * a NULL cm_reply due to a reset.
827 */
828 sc->mps_flags |= MPS_FLAGS_DIAGRESET;
829
830 /*
831 * Mask interrupts here.
832 */
833 mps_dprint(sc, MPS_INIT, "masking interrupts and resetting\n");
834 mps_mask_intr(sc);
835
836 error = mps_diag_reset(sc, CAN_SLEEP);
837 if (error != 0) {
838 /* XXXSL No need to panic here */
839 panic("%s hard reset failed with error %d\n",
840 __func__, error);
841 }
842
843 /* Restore the PCI state, including the MSI-X registers */
844 mps_pci_restore(sc);
845
846 /* Give the I/O subsystem special priority to get itself prepared */
847 mpssas_handle_reinit(sc);
848
849 /*
850 * Get IOC Facts and allocate all structures based on this information.
851 * The attach function will also call mps_iocfacts_allocate at startup.
852 * If relevant values have changed in IOC Facts, this function will free
853 * all of the memory based on IOC Facts and reallocate that memory.
854 */
855 if ((error = mps_iocfacts_allocate(sc, FALSE)) != 0) {
856 panic("%s IOC Facts based allocation failed with error %d\n",
857 __func__, error);
858 }
859
860 /*
861 * Mapping structures will be re-allocated after getting IOC Page8, so
862 * free these structures here.
863 */
864 mps_mapping_exit(sc);
865
866 /*
867 * The static page function currently read is IOC Page8. Others can be
868 * added in future. It's possible that the values in IOC Page8 have
869 * changed after a Diag Reset due to user modification, so always read
870 * these. Interrupts are masked, so unmask them before getting config
871 * pages.
872 */
873 mps_unmask_intr(sc);
874 sc->mps_flags &= ~MPS_FLAGS_DIAGRESET;
875 mps_base_static_config_pages(sc);
876
877 /*
878 * Some mapping info is based in IOC Page8 data, so re-initialize the
879 * mapping tables.
880 */
881 mps_mapping_initialize(sc);
882
883 /*
884 * Restart will reload the event masks clobbered by the reset, and
885 * then enable the port.
886 */
887 mps_reregister_events(sc);
888
889 /* the end of discovery will release the simq, so we're done. */
890 mps_dprint(sc, MPS_INIT|MPS_XINFO, "Finished sc %p post %u free %u\n",
891 sc, sc->replypostindex, sc->replyfreeindex);
892
893 mpssas_release_simq_reinit(sassc);
894 mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
895
896 return 0;
897 }
898
899 /* Wait for the chip to ACK a word that we've put into its FIFO
900 * Wait for <timeout> seconds. In single loop wait for busy loop
901 * for 500 microseconds.
902 * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds.
903 * */
904 static int
mps_wait_db_ack(struct mps_softc * sc,int timeout,int sleep_flag)905 mps_wait_db_ack(struct mps_softc *sc, int timeout, int sleep_flag)
906 {
907
908 u32 cntdn, count;
909 u32 int_status;
910 u32 doorbell;
911
912 count = 0;
913 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
914 do {
915 int_status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
916 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
917 mps_dprint(sc, MPS_TRACE,
918 "%s: successful count(%d), timeout(%d)\n",
919 __func__, count, timeout);
920 return 0;
921 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
922 doorbell = mps_regread(sc, MPI2_DOORBELL_OFFSET);
923 if ((doorbell & MPI2_IOC_STATE_MASK) ==
924 MPI2_IOC_STATE_FAULT) {
925 mps_dprint(sc, MPS_FAULT,
926 "fault_state(0x%04x)!\n", doorbell);
927 return (EFAULT);
928 }
929 } else if (int_status == 0xFFFFFFFF)
930 goto out;
931
932 /* If it can sleep, sleep for 1 milisecond, else busy loop for
933 * 0.5 milisecond */
934 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP)
935 msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0,
936 "mpsdba", hz/1000);
937 else if (sleep_flag == CAN_SLEEP)
938 pause("mpsdba", hz/1000);
939 else
940 DELAY(500);
941 count++;
942 } while (--cntdn);
943
944 out:
945 mps_dprint(sc, MPS_FAULT, "%s: failed due to timeout count(%d), "
946 "int_status(%x)!\n", __func__, count, int_status);
947 return (ETIMEDOUT);
948
949 }
950
951 /* Wait for the chip to signal that the next word in its FIFO can be fetched */
952 static int
mps_wait_db_int(struct mps_softc * sc)953 mps_wait_db_int(struct mps_softc *sc)
954 {
955 int retry;
956
957 for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) {
958 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) &
959 MPI2_HIS_IOC2SYS_DB_STATUS) != 0)
960 return (0);
961 DELAY(2000);
962 }
963 return (ETIMEDOUT);
964 }
965
966 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */
967 static int
mps_request_sync(struct mps_softc * sc,void * req,MPI2_DEFAULT_REPLY * reply,int req_sz,int reply_sz,int timeout)968 mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply,
969 int req_sz, int reply_sz, int timeout)
970 {
971 uint32_t *data32;
972 uint16_t *data16;
973 int i, count, ioc_sz, residual;
974 int sleep_flags = CAN_SLEEP;
975
976 if (curthread->td_no_sleeping != 0)
977 sleep_flags = NO_SLEEP;
978
979 /* Step 1 */
980 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
981
982 /* Step 2 */
983 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
984 return (EBUSY);
985
986 /* Step 3
987 * Announce that a message is coming through the doorbell. Messages
988 * are pushed at 32bit words, so round up if needed.
989 */
990 count = (req_sz + 3) / 4;
991 mps_regwrite(sc, MPI2_DOORBELL_OFFSET,
992 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) |
993 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT));
994
995 /* Step 4 */
996 if (mps_wait_db_int(sc) ||
997 (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) {
998 mps_dprint(sc, MPS_FAULT, "Doorbell failed to activate\n");
999 return (ENXIO);
1000 }
1001 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1002 if (mps_wait_db_ack(sc, 5, sleep_flags) != 0) {
1003 mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed\n");
1004 return (ENXIO);
1005 }
1006
1007 /* Step 5 */
1008 /* Clock out the message data synchronously in 32-bit dwords*/
1009 data32 = (uint32_t *)req;
1010 for (i = 0; i < count; i++) {
1011 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i]));
1012 if (mps_wait_db_ack(sc, 5, sleep_flags) != 0) {
1013 mps_dprint(sc, MPS_FAULT,
1014 "Timeout while writing doorbell\n");
1015 return (ENXIO);
1016 }
1017 }
1018
1019 /* Step 6 */
1020 /* Clock in the reply in 16-bit words. The total length of the
1021 * message is always in the 4th byte, so clock out the first 2 words
1022 * manually, then loop the rest.
1023 */
1024 data16 = (uint16_t *)reply;
1025 if (mps_wait_db_int(sc) != 0) {
1026 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 0\n");
1027 return (ENXIO);
1028 }
1029 data16[0] =
1030 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
1031 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1032 if (mps_wait_db_int(sc) != 0) {
1033 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 1\n");
1034 return (ENXIO);
1035 }
1036 data16[1] =
1037 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK;
1038 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1039
1040 /* Number of 32bit words in the message */
1041 ioc_sz = reply->MsgLength;
1042
1043 /*
1044 * Figure out how many 16bit words to clock in without overrunning.
1045 * The precision loss with dividing reply_sz can safely be
1046 * ignored because the messages can only be multiples of 32bits.
1047 */
1048 residual = 0;
1049 count = MIN((reply_sz / 4), ioc_sz) * 2;
1050 if (count < ioc_sz * 2) {
1051 residual = ioc_sz * 2 - count;
1052 mps_dprint(sc, MPS_ERROR, "Driver error, throwing away %d "
1053 "residual message words\n", residual);
1054 }
1055
1056 for (i = 2; i < count; i++) {
1057 if (mps_wait_db_int(sc) != 0) {
1058 mps_dprint(sc, MPS_FAULT,
1059 "Timeout reading doorbell %d\n", i);
1060 return (ENXIO);
1061 }
1062 data16[i] = mps_regread(sc, MPI2_DOORBELL_OFFSET) &
1063 MPI2_DOORBELL_DATA_MASK;
1064 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1065 }
1066
1067 /*
1068 * Pull out residual words that won't fit into the provided buffer.
1069 * This keeps the chip from hanging due to a driver programming
1070 * error.
1071 */
1072 while (residual--) {
1073 if (mps_wait_db_int(sc) != 0) {
1074 mps_dprint(sc, MPS_FAULT,
1075 "Timeout reading doorbell\n");
1076 return (ENXIO);
1077 }
1078 (void)mps_regread(sc, MPI2_DOORBELL_OFFSET);
1079 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1080 }
1081
1082 /* Step 7 */
1083 if (mps_wait_db_int(sc) != 0) {
1084 mps_dprint(sc, MPS_FAULT, "Timeout waiting to exit doorbell\n");
1085 return (ENXIO);
1086 }
1087 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED)
1088 mps_dprint(sc, MPS_FAULT, "Warning, doorbell still active\n");
1089 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0);
1090
1091 return (0);
1092 }
1093
1094 static void
mps_enqueue_request(struct mps_softc * sc,struct mps_command * cm)1095 mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm)
1096 {
1097 request_descriptor_t rd;
1098 MPS_FUNCTRACE(sc);
1099 mps_dprint(sc, MPS_TRACE, "SMID %u cm %p ccb %p\n",
1100 cm->cm_desc.Default.SMID, cm, cm->cm_ccb);
1101
1102 if (sc->mps_flags & MPS_FLAGS_ATTACH_DONE && !(sc->mps_flags & MPS_FLAGS_SHUTDOWN))
1103 mtx_assert(&sc->mps_mtx, MA_OWNED);
1104
1105 if (++sc->io_cmds_active > sc->io_cmds_highwater)
1106 sc->io_cmds_highwater++;
1107 rd.u.low = cm->cm_desc.Words.Low;
1108 rd.u.high = cm->cm_desc.Words.High;
1109 rd.word = htole64(rd.word);
1110
1111 KASSERT(cm->cm_state == MPS_CM_STATE_BUSY,
1112 ("command not busy, state = %u\n", cm->cm_state));
1113 cm->cm_state = MPS_CM_STATE_INQUEUE;
1114
1115 /* TODO-We may need to make below regwrite atomic */
1116 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET,
1117 rd.u.low);
1118 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET,
1119 rd.u.high);
1120 }
1121
1122 /*
1123 * Just the FACTS, ma'am.
1124 */
1125 static int
mps_get_iocfacts(struct mps_softc * sc,MPI2_IOC_FACTS_REPLY * facts)1126 mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts)
1127 {
1128 MPI2_DEFAULT_REPLY *reply;
1129 MPI2_IOC_FACTS_REQUEST request;
1130 int error, req_sz, reply_sz;
1131
1132 MPS_FUNCTRACE(sc);
1133 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
1134
1135 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST);
1136 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY);
1137 reply = (MPI2_DEFAULT_REPLY *)facts;
1138
1139 bzero(&request, req_sz);
1140 request.Function = MPI2_FUNCTION_IOC_FACTS;
1141 error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5);
1142 mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
1143
1144 return (error);
1145 }
1146
1147 static int
mps_send_iocinit(struct mps_softc * sc)1148 mps_send_iocinit(struct mps_softc *sc)
1149 {
1150 MPI2_IOC_INIT_REQUEST init;
1151 MPI2_DEFAULT_REPLY reply;
1152 int req_sz, reply_sz, error;
1153 struct timeval now;
1154 uint64_t time_in_msec;
1155
1156 MPS_FUNCTRACE(sc);
1157 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
1158
1159 /* Do a quick sanity check on proper initialization */
1160 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0)
1161 || (sc->replyframesz == 0)) {
1162 mps_dprint(sc, MPS_INIT|MPS_ERROR,
1163 "Driver not fully initialized for IOCInit\n");
1164 return (EINVAL);
1165 }
1166
1167 req_sz = sizeof(MPI2_IOC_INIT_REQUEST);
1168 reply_sz = sizeof(MPI2_IOC_INIT_REPLY);
1169 bzero(&init, req_sz);
1170 bzero(&reply, reply_sz);
1171
1172 /*
1173 * Fill in the init block. Note that most addresses are
1174 * deliberately in the lower 32bits of memory. This is a micro-
1175 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe.
1176 */
1177 init.Function = MPI2_FUNCTION_IOC_INIT;
1178 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
1179 init.MsgVersion = htole16(MPI2_VERSION);
1180 init.HeaderVersion = htole16(MPI2_HEADER_VERSION);
1181 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4));
1182 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth);
1183 init.ReplyFreeQueueDepth = htole16(sc->fqdepth);
1184 init.SenseBufferAddressHigh = 0;
1185 init.SystemReplyAddressHigh = 0;
1186 init.SystemRequestFrameBaseAddress.High = 0;
1187 init.SystemRequestFrameBaseAddress.Low = htole32((uint32_t)sc->req_busaddr);
1188 init.ReplyDescriptorPostQueueAddress.High = 0;
1189 init.ReplyDescriptorPostQueueAddress.Low = htole32((uint32_t)sc->post_busaddr);
1190 init.ReplyFreeQueueAddress.High = 0;
1191 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr);
1192 getmicrotime(&now);
1193 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000);
1194 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF);
1195 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF);
1196
1197 error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5);
1198 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
1199 error = ENXIO;
1200
1201 mps_dprint(sc, MPS_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus);
1202 mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
1203 return (error);
1204 }
1205
1206 void
mps_memaddr_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1207 mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1208 {
1209 bus_addr_t *addr;
1210
1211 addr = arg;
1212 *addr = segs[0].ds_addr;
1213 }
1214
1215 void
mps_memaddr_wait_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1216 mps_memaddr_wait_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1217 {
1218 struct mps_busdma_context *ctx;
1219 int need_unload, need_free;
1220
1221 ctx = (struct mps_busdma_context *)arg;
1222 need_unload = 0;
1223 need_free = 0;
1224
1225 mps_lock(ctx->softc);
1226 ctx->error = error;
1227 ctx->completed = 1;
1228 if ((error == 0) && (ctx->abandoned == 0)) {
1229 *ctx->addr = segs[0].ds_addr;
1230 } else {
1231 if (nsegs != 0)
1232 need_unload = 1;
1233 if (ctx->abandoned != 0)
1234 need_free = 1;
1235 }
1236 if (need_free == 0)
1237 wakeup(ctx);
1238
1239 mps_unlock(ctx->softc);
1240
1241 if (need_unload != 0) {
1242 bus_dmamap_unload(ctx->buffer_dmat,
1243 ctx->buffer_dmamap);
1244 *ctx->addr = 0;
1245 }
1246
1247 if (need_free != 0)
1248 free(ctx, M_MPSUSER);
1249 }
1250
1251 static int
mps_alloc_queues(struct mps_softc * sc)1252 mps_alloc_queues(struct mps_softc *sc)
1253 {
1254 struct mps_queue *q;
1255 u_int nq, i;
1256
1257 nq = sc->msi_msgs;
1258 mps_dprint(sc, MPS_INIT|MPS_XINFO, "Allocating %d I/O queues\n", nq);
1259
1260 sc->queues = malloc(sizeof(struct mps_queue) * nq, M_MPT2,
1261 M_NOWAIT|M_ZERO);
1262 if (sc->queues == NULL)
1263 return (ENOMEM);
1264
1265 for (i = 0; i < nq; i++) {
1266 q = &sc->queues[i];
1267 mps_dprint(sc, MPS_INIT, "Configuring queue %d %p\n", i, q);
1268 q->sc = sc;
1269 q->qnum = i;
1270 }
1271
1272 return (0);
1273 }
1274
1275 static int
mps_alloc_hw_queues(struct mps_softc * sc)1276 mps_alloc_hw_queues(struct mps_softc *sc)
1277 {
1278 bus_dma_template_t t;
1279 bus_addr_t queues_busaddr;
1280 uint8_t *queues;
1281 int qsize, fqsize, pqsize;
1282
1283 /*
1284 * The reply free queue contains 4 byte entries in multiples of 16 and
1285 * aligned on a 16 byte boundary. There must always be an unused entry.
1286 * This queue supplies fresh reply frames for the firmware to use.
1287 *
1288 * The reply descriptor post queue contains 8 byte entries in
1289 * multiples of 16 and aligned on a 16 byte boundary. This queue
1290 * contains filled-in reply frames sent from the firmware to the host.
1291 *
1292 * These two queues are allocated together for simplicity.
1293 */
1294 sc->fqdepth = roundup2(sc->num_replies + 1, 16);
1295 sc->pqdepth = roundup2(sc->num_replies + 1, 16);
1296 fqsize= sc->fqdepth * 4;
1297 pqsize = sc->pqdepth * 8;
1298 qsize = fqsize + pqsize;
1299
1300 bus_dma_template_init(&t, sc->mps_parent_dmat);
1301 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(16), BD_MAXSIZE(qsize),
1302 BD_MAXSEGSIZE(qsize), BD_NSEGMENTS(1),
1303 BD_LOWADDR(BUS_SPACE_MAXADDR_32BIT));
1304 if (bus_dma_template_tag(&t, &sc->queues_dmat)) {
1305 mps_dprint(sc, MPS_ERROR, "Cannot allocate queues DMA tag\n");
1306 return (ENOMEM);
1307 }
1308 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT,
1309 &sc->queues_map)) {
1310 mps_dprint(sc, MPS_ERROR, "Cannot allocate queues memory\n");
1311 return (ENOMEM);
1312 }
1313 bzero(queues, qsize);
1314 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize,
1315 mps_memaddr_cb, &queues_busaddr, 0);
1316
1317 sc->free_queue = (uint32_t *)queues;
1318 sc->free_busaddr = queues_busaddr;
1319 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize);
1320 sc->post_busaddr = queues_busaddr + fqsize;
1321 mps_dprint(sc, MPS_INIT, "free queue busaddr= %#016jx size= %d\n",
1322 (uintmax_t)sc->free_busaddr, fqsize);
1323 mps_dprint(sc, MPS_INIT, "reply queue busaddr= %#016jx size= %d\n",
1324 (uintmax_t)sc->post_busaddr, pqsize);
1325
1326 return (0);
1327 }
1328
1329 static int
mps_alloc_replies(struct mps_softc * sc)1330 mps_alloc_replies(struct mps_softc *sc)
1331 {
1332 bus_dma_template_t t;
1333 int rsize, num_replies;
1334
1335 /* Store the reply frame size in bytes rather than as 32bit words */
1336 sc->replyframesz = sc->facts->ReplyFrameSize * 4;
1337
1338 /*
1339 * sc->num_replies should be one less than sc->fqdepth. We need to
1340 * allocate space for sc->fqdepth replies, but only sc->num_replies
1341 * replies can be used at once.
1342 */
1343 num_replies = max(sc->fqdepth, sc->num_replies);
1344
1345 rsize = sc->replyframesz * num_replies;
1346 bus_dma_template_init(&t, sc->mps_parent_dmat);
1347 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(4), BD_MAXSIZE(rsize),
1348 BD_MAXSEGSIZE(rsize), BD_NSEGMENTS(1),
1349 BD_LOWADDR(BUS_SPACE_MAXADDR_32BIT));
1350 if (bus_dma_template_tag(&t, &sc->reply_dmat)) {
1351 mps_dprint(sc, MPS_ERROR, "Cannot allocate replies DMA tag\n");
1352 return (ENOMEM);
1353 }
1354 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames,
1355 BUS_DMA_NOWAIT, &sc->reply_map)) {
1356 mps_dprint(sc, MPS_ERROR, "Cannot allocate replies memory\n");
1357 return (ENOMEM);
1358 }
1359 bzero(sc->reply_frames, rsize);
1360 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize,
1361 mps_memaddr_cb, &sc->reply_busaddr, 0);
1362
1363 mps_dprint(sc, MPS_INIT, "reply frames busaddr= %#016jx size= %d\n",
1364 (uintmax_t)sc->reply_busaddr, rsize);
1365
1366 return (0);
1367 }
1368
1369 static void
mps_load_chains_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)1370 mps_load_chains_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1371 {
1372 struct mps_softc *sc = arg;
1373 struct mps_chain *chain;
1374 bus_size_t bo;
1375 int i, o, s;
1376
1377 if (error != 0)
1378 return;
1379
1380 for (i = 0, o = 0, s = 0; s < nsegs; s++) {
1381 KASSERT(segs[s].ds_addr + segs[s].ds_len - 1 <= BUS_SPACE_MAXADDR_32BIT,
1382 ("mps: Bad segment address %#jx len %#jx\n", (uintmax_t)segs[s].ds_addr,
1383 (uintmax_t)segs[s].ds_len));
1384 for (bo = 0; bo + sc->reqframesz <= segs[s].ds_len;
1385 bo += sc->reqframesz) {
1386 chain = &sc->chains[i++];
1387 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o);
1388 chain->chain_busaddr = segs[s].ds_addr + bo;
1389 o += sc->reqframesz;
1390 mps_free_chain(sc, chain);
1391 }
1392 if (bo != segs[s].ds_len)
1393 o += segs[s].ds_len - bo;
1394 }
1395 sc->chain_free_lowwater = i;
1396 }
1397
1398 static int
mps_alloc_requests(struct mps_softc * sc)1399 mps_alloc_requests(struct mps_softc *sc)
1400 {
1401 bus_dma_template_t t;
1402 struct mps_command *cm;
1403 int i, rsize, nsegs;
1404
1405 rsize = sc->reqframesz * sc->num_reqs;
1406 bus_dma_template_init(&t, sc->mps_parent_dmat);
1407 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(16), BD_MAXSIZE(rsize),
1408 BD_MAXSEGSIZE(rsize), BD_NSEGMENTS(1),
1409 BD_LOWADDR(BUS_SPACE_MAXADDR_32BIT));
1410 if (bus_dma_template_tag(&t, &sc->req_dmat)) {
1411 mps_dprint(sc, MPS_ERROR, "Cannot allocate request DMA tag\n");
1412 return (ENOMEM);
1413 }
1414 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames,
1415 BUS_DMA_NOWAIT, &sc->req_map)) {
1416 mps_dprint(sc, MPS_ERROR, "Cannot allocate request memory\n");
1417 return (ENOMEM);
1418 }
1419 bzero(sc->req_frames, rsize);
1420 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize,
1421 mps_memaddr_cb, &sc->req_busaddr, 0);
1422 mps_dprint(sc, MPS_INIT, "request frames busaddr= %#016jx size= %d\n",
1423 (uintmax_t)sc->req_busaddr, rsize);
1424
1425 sc->chains = malloc(sizeof(struct mps_chain) * sc->num_chains, M_MPT2,
1426 M_NOWAIT | M_ZERO);
1427 if (!sc->chains) {
1428 mps_dprint(sc, MPS_ERROR, "Cannot allocate chain memory\n");
1429 return (ENOMEM);
1430 }
1431 rsize = sc->reqframesz * sc->num_chains;
1432 bus_dma_template_clone(&t, sc->req_dmat);
1433 BUS_DMA_TEMPLATE_FILL(&t, BD_MAXSIZE(rsize), BD_MAXSEGSIZE(rsize),
1434 BD_NSEGMENTS(howmany(rsize, PAGE_SIZE)),
1435 BD_BOUNDARY(BUS_SPACE_MAXSIZE_32BIT+1));
1436 if (bus_dma_template_tag(&t, &sc->chain_dmat)) {
1437 mps_dprint(sc, MPS_ERROR, "Cannot allocate chain DMA tag\n");
1438 return (ENOMEM);
1439 }
1440 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames,
1441 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) {
1442 mps_dprint(sc, MPS_ERROR, "Cannot allocate chain memory\n");
1443 return (ENOMEM);
1444 }
1445 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames,
1446 rsize, mps_load_chains_cb, sc, BUS_DMA_NOWAIT)) {
1447 mps_dprint(sc, MPS_ERROR, "Cannot load chain memory\n");
1448 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,
1449 sc->chain_map);
1450 return (ENOMEM);
1451 }
1452
1453 rsize = MPS_SENSE_LEN * sc->num_reqs;
1454 bus_dma_template_clone(&t, sc->req_dmat);
1455 BUS_DMA_TEMPLATE_FILL(&t, BD_ALIGNMENT(1), BD_MAXSIZE(rsize),
1456 BD_MAXSEGSIZE(rsize));
1457 if (bus_dma_template_tag(&t, &sc->sense_dmat)) {
1458 mps_dprint(sc, MPS_ERROR, "Cannot allocate sense DMA tag\n");
1459 return (ENOMEM);
1460 }
1461 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames,
1462 BUS_DMA_NOWAIT, &sc->sense_map)) {
1463 mps_dprint(sc, MPS_ERROR, "Cannot allocate sense memory\n");
1464 return (ENOMEM);
1465 }
1466 bzero(sc->sense_frames, rsize);
1467 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize,
1468 mps_memaddr_cb, &sc->sense_busaddr, 0);
1469 mps_dprint(sc, MPS_INIT, "sense frames busaddr= %#016jx size= %d\n",
1470 (uintmax_t)sc->sense_busaddr, rsize);
1471
1472 nsegs = (sc->maxio / PAGE_SIZE) + 1;
1473 bus_dma_template_init(&t, sc->mps_parent_dmat);
1474 BUS_DMA_TEMPLATE_FILL(&t, BD_MAXSIZE(BUS_SPACE_MAXSIZE_32BIT),
1475 BD_NSEGMENTS(nsegs), BD_MAXSEGSIZE(BUS_SPACE_MAXSIZE_24BIT),
1476 BD_FLAGS(BUS_DMA_ALLOCNOW), BD_LOCKFUNC(busdma_lock_mutex),
1477 BD_LOCKFUNCARG(&sc->mps_mtx),
1478 BD_BOUNDARY(BUS_SPACE_MAXSIZE_32BIT+1));
1479 if (bus_dma_template_tag(&t, &sc->buffer_dmat)) {
1480 mps_dprint(sc, MPS_ERROR, "Cannot allocate buffer DMA tag\n");
1481 return (ENOMEM);
1482 }
1483
1484 /*
1485 * SMID 0 cannot be used as a free command per the firmware spec.
1486 * Just drop that command instead of risking accounting bugs.
1487 */
1488 sc->commands = malloc(sizeof(struct mps_command) * sc->num_reqs,
1489 M_MPT2, M_WAITOK | M_ZERO);
1490 for (i = 1; i < sc->num_reqs; i++) {
1491 cm = &sc->commands[i];
1492 cm->cm_req = sc->req_frames + i * sc->reqframesz;
1493 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz;
1494 cm->cm_sense = &sc->sense_frames[i];
1495 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN;
1496 cm->cm_desc.Default.SMID = i;
1497 cm->cm_sc = sc;
1498 cm->cm_state = MPS_CM_STATE_BUSY;
1499 TAILQ_INIT(&cm->cm_chain_list);
1500 callout_init_mtx(&cm->cm_callout, &sc->mps_mtx, 0);
1501
1502 /* XXX Is a failure here a critical problem? */
1503 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0)
1504 if (i <= sc->num_prireqs)
1505 mps_free_high_priority_command(sc, cm);
1506 else
1507 mps_free_command(sc, cm);
1508 else {
1509 panic("failed to allocate command %d\n", i);
1510 sc->num_reqs = i;
1511 break;
1512 }
1513 }
1514
1515 return (0);
1516 }
1517
1518 static int
mps_init_queues(struct mps_softc * sc)1519 mps_init_queues(struct mps_softc *sc)
1520 {
1521 int i;
1522
1523 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8);
1524
1525 /*
1526 * According to the spec, we need to use one less reply than we
1527 * have space for on the queue. So sc->num_replies (the number we
1528 * use) should be less than sc->fqdepth (allocated size).
1529 */
1530 if (sc->num_replies >= sc->fqdepth)
1531 return (EINVAL);
1532
1533 /*
1534 * Initialize all of the free queue entries.
1535 */
1536 for (i = 0; i < sc->fqdepth; i++)
1537 sc->free_queue[i] = sc->reply_busaddr + (i * sc->replyframesz);
1538 sc->replyfreeindex = sc->num_replies;
1539
1540 return (0);
1541 }
1542
1543 /* Get the driver parameter tunables. Lowest priority are the driver defaults.
1544 * Next are the global settings, if they exist. Highest are the per-unit
1545 * settings, if they exist.
1546 */
1547 void
mps_get_tunables(struct mps_softc * sc)1548 mps_get_tunables(struct mps_softc *sc)
1549 {
1550 char tmpstr[80], mps_debug[80];
1551
1552 /* XXX default to some debugging for now */
1553 sc->mps_debug = MPS_INFO|MPS_FAULT;
1554 sc->disable_msix = 0;
1555 sc->disable_msi = 0;
1556 sc->max_msix = MPS_MSIX_MAX;
1557 sc->max_chains = MPS_CHAIN_FRAMES;
1558 sc->max_io_pages = MPS_MAXIO_PAGES;
1559 sc->enable_ssu = MPS_SSU_ENABLE_SSD_DISABLE_HDD;
1560 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT;
1561 sc->use_phynum = 1;
1562 sc->max_reqframes = MPS_REQ_FRAMES;
1563 sc->max_prireqframes = MPS_PRI_REQ_FRAMES;
1564 sc->max_replyframes = MPS_REPLY_FRAMES;
1565 sc->max_evtframes = MPS_EVT_REPLY_FRAMES;
1566
1567 /*
1568 * Grab the global variables.
1569 */
1570 bzero(mps_debug, 80);
1571 if (TUNABLE_STR_FETCH("hw.mps.debug_level", mps_debug, 80) != 0)
1572 mps_parse_debug(sc, mps_debug);
1573 TUNABLE_INT_FETCH("hw.mps.disable_msix", &sc->disable_msix);
1574 TUNABLE_INT_FETCH("hw.mps.disable_msi", &sc->disable_msi);
1575 TUNABLE_INT_FETCH("hw.mps.max_msix", &sc->max_msix);
1576 TUNABLE_INT_FETCH("hw.mps.max_chains", &sc->max_chains);
1577 TUNABLE_INT_FETCH("hw.mps.max_io_pages", &sc->max_io_pages);
1578 TUNABLE_INT_FETCH("hw.mps.enable_ssu", &sc->enable_ssu);
1579 TUNABLE_INT_FETCH("hw.mps.spinup_wait_time", &sc->spinup_wait_time);
1580 TUNABLE_INT_FETCH("hw.mps.use_phy_num", &sc->use_phynum);
1581 TUNABLE_INT_FETCH("hw.mps.max_reqframes", &sc->max_reqframes);
1582 TUNABLE_INT_FETCH("hw.mps.max_prireqframes", &sc->max_prireqframes);
1583 TUNABLE_INT_FETCH("hw.mps.max_replyframes", &sc->max_replyframes);
1584 TUNABLE_INT_FETCH("hw.mps.max_evtframes", &sc->max_evtframes);
1585
1586 /* Grab the unit-instance variables */
1587 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.debug_level",
1588 device_get_unit(sc->mps_dev));
1589 bzero(mps_debug, 80);
1590 if (TUNABLE_STR_FETCH(tmpstr, mps_debug, 80) != 0)
1591 mps_parse_debug(sc, mps_debug);
1592
1593 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msix",
1594 device_get_unit(sc->mps_dev));
1595 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix);
1596
1597 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msi",
1598 device_get_unit(sc->mps_dev));
1599 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi);
1600
1601 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_msix",
1602 device_get_unit(sc->mps_dev));
1603 TUNABLE_INT_FETCH(tmpstr, &sc->max_msix);
1604
1605 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_chains",
1606 device_get_unit(sc->mps_dev));
1607 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains);
1608
1609 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_io_pages",
1610 device_get_unit(sc->mps_dev));
1611 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages);
1612
1613 bzero(sc->exclude_ids, sizeof(sc->exclude_ids));
1614 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.exclude_ids",
1615 device_get_unit(sc->mps_dev));
1616 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids));
1617
1618 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.enable_ssu",
1619 device_get_unit(sc->mps_dev));
1620 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu);
1621
1622 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.spinup_wait_time",
1623 device_get_unit(sc->mps_dev));
1624 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time);
1625
1626 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.use_phy_num",
1627 device_get_unit(sc->mps_dev));
1628 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum);
1629
1630 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_reqframes",
1631 device_get_unit(sc->mps_dev));
1632 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes);
1633
1634 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_prireqframes",
1635 device_get_unit(sc->mps_dev));
1636 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes);
1637
1638 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_replyframes",
1639 device_get_unit(sc->mps_dev));
1640 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes);
1641
1642 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_evtframes",
1643 device_get_unit(sc->mps_dev));
1644 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes);
1645
1646 }
1647
1648 static void
mps_setup_sysctl(struct mps_softc * sc)1649 mps_setup_sysctl(struct mps_softc *sc)
1650 {
1651 struct sysctl_ctx_list *sysctl_ctx = NULL;
1652 struct sysctl_oid *sysctl_tree = NULL;
1653 char tmpstr[80], tmpstr2[80];
1654
1655 /*
1656 * Setup the sysctl variable so the user can change the debug level
1657 * on the fly.
1658 */
1659 snprintf(tmpstr, sizeof(tmpstr), "MPS controller %d",
1660 device_get_unit(sc->mps_dev));
1661 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev));
1662
1663 sysctl_ctx = device_get_sysctl_ctx(sc->mps_dev);
1664 if (sysctl_ctx != NULL)
1665 sysctl_tree = device_get_sysctl_tree(sc->mps_dev);
1666
1667 if (sysctl_tree == NULL) {
1668 sysctl_ctx_init(&sc->sysctl_ctx);
1669 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx,
1670 SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2,
1671 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, tmpstr);
1672 if (sc->sysctl_tree == NULL)
1673 return;
1674 sysctl_ctx = &sc->sysctl_ctx;
1675 sysctl_tree = sc->sysctl_tree;
1676 }
1677
1678 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1679 OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW |CTLFLAG_MPSAFE,
1680 sc, 0, mps_debug_sysctl, "A", "mps debug level");
1681
1682 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1683 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0,
1684 "Disable the use of MSI-X interrupts");
1685
1686 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1687 OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0,
1688 "Disable the use of MSI interrupts");
1689
1690 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1691 OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0,
1692 "User-defined maximum number of MSIX queues");
1693
1694 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1695 OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0,
1696 "Negotiated number of MSIX queues");
1697
1698 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1699 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0,
1700 "Total number of allocated request frames");
1701
1702 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1703 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0,
1704 "Total number of allocated high priority request frames");
1705
1706 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1707 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0,
1708 "Total number of allocated reply frames");
1709
1710 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1711 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0,
1712 "Total number of event frames allocated");
1713
1714 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1715 OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version,
1716 strlen(sc->fw_version), "firmware version");
1717
1718 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1719 OID_AUTO, "driver_version", CTLFLAG_RD, MPS_DRIVER_VERSION,
1720 strlen(MPS_DRIVER_VERSION), "driver version");
1721
1722 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1723 OID_AUTO, "msg_version", CTLFLAG_RD, sc->msg_version,
1724 strlen(sc->msg_version), "message interface version (deprecated)");
1725
1726 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1727 OID_AUTO, "io_cmds_active", CTLFLAG_RD,
1728 &sc->io_cmds_active, 0, "number of currently active commands");
1729
1730 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1731 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD,
1732 &sc->io_cmds_highwater, 0, "maximum active commands seen");
1733
1734 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1735 OID_AUTO, "chain_free", CTLFLAG_RD,
1736 &sc->chain_free, 0, "number of free chain elements");
1737
1738 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1739 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD,
1740 &sc->chain_free_lowwater, 0,"lowest number of free chain elements");
1741
1742 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1743 OID_AUTO, "max_chains", CTLFLAG_RD,
1744 &sc->max_chains, 0,"maximum chain frames that will be allocated");
1745
1746 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1747 OID_AUTO, "max_io_pages", CTLFLAG_RD,
1748 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use "
1749 "IOCFacts)");
1750
1751 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1752 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0,
1753 "enable SSU to SATA SSD/HDD at shutdown");
1754
1755 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1756 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD,
1757 &sc->chain_alloc_fail, "chain allocation failures");
1758
1759 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1760 OID_AUTO, "spinup_wait_time", CTLFLAG_RD,
1761 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for "
1762 "spinup after SATA ID error");
1763
1764 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1765 OID_AUTO, "mapping_table_dump",
1766 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
1767 mps_mapping_dump, "A", "Mapping Table Dump");
1768
1769 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1770 OID_AUTO, "encl_table_dump",
1771 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
1772 mps_mapping_encl_dump, "A", "Enclosure Table Dump");
1773
1774 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1775 OID_AUTO, "dump_reqs",
1776 CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE,
1777 sc, 0, mps_dump_reqs, "I", "Dump Active Requests");
1778
1779 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1780 OID_AUTO, "dump_reqs_alltypes", CTLFLAG_RW,
1781 &sc->dump_reqs_alltypes, 0,
1782 "dump all request types not just inqueue");
1783
1784 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
1785 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0,
1786 "Use the phy number for enumeration");
1787 }
1788
1789 static struct mps_debug_string {
1790 char *name;
1791 int flag;
1792 } mps_debug_strings[] = {
1793 {"info", MPS_INFO},
1794 {"fault", MPS_FAULT},
1795 {"event", MPS_EVENT},
1796 {"log", MPS_LOG},
1797 {"recovery", MPS_RECOVERY},
1798 {"error", MPS_ERROR},
1799 {"init", MPS_INIT},
1800 {"xinfo", MPS_XINFO},
1801 {"user", MPS_USER},
1802 {"mapping", MPS_MAPPING},
1803 {"trace", MPS_TRACE}
1804 };
1805
1806 enum mps_debug_level_combiner {
1807 COMB_NONE,
1808 COMB_ADD,
1809 COMB_SUB
1810 };
1811
1812 static int
mps_debug_sysctl(SYSCTL_HANDLER_ARGS)1813 mps_debug_sysctl(SYSCTL_HANDLER_ARGS)
1814 {
1815 struct mps_softc *sc;
1816 struct mps_debug_string *string;
1817 struct sbuf *sbuf;
1818 char *buffer;
1819 size_t sz;
1820 int i, len, debug, error;
1821
1822 sc = (struct mps_softc *)arg1;
1823
1824 error = sysctl_wire_old_buffer(req, 0);
1825 if (error != 0)
1826 return (error);
1827
1828 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
1829 debug = sc->mps_debug;
1830
1831 sbuf_printf(sbuf, "%#x", debug);
1832
1833 sz = sizeof(mps_debug_strings) / sizeof(mps_debug_strings[0]);
1834 for (i = 0; i < sz; i++) {
1835 string = &mps_debug_strings[i];
1836 if (debug & string->flag)
1837 sbuf_printf(sbuf, ",%s", string->name);
1838 }
1839
1840 error = sbuf_finish(sbuf);
1841 sbuf_delete(sbuf);
1842
1843 if (error || req->newptr == NULL)
1844 return (error);
1845
1846 len = req->newlen - req->newidx;
1847 if (len == 0)
1848 return (0);
1849
1850 buffer = malloc(len, M_MPT2, M_ZERO|M_WAITOK);
1851 error = SYSCTL_IN(req, buffer, len);
1852
1853 mps_parse_debug(sc, buffer);
1854
1855 free(buffer, M_MPT2);
1856 return (error);
1857 }
1858
1859 static void
mps_parse_debug(struct mps_softc * sc,char * list)1860 mps_parse_debug(struct mps_softc *sc, char *list)
1861 {
1862 struct mps_debug_string *string;
1863 enum mps_debug_level_combiner op;
1864 char *token, *endtoken;
1865 size_t sz;
1866 int flags, i;
1867
1868 if (list == NULL || *list == '\0')
1869 return;
1870
1871 if (*list == '+') {
1872 op = COMB_ADD;
1873 list++;
1874 } else if (*list == '-') {
1875 op = COMB_SUB;
1876 list++;
1877 } else
1878 op = COMB_NONE;
1879 if (*list == '\0')
1880 return;
1881
1882 flags = 0;
1883 sz = sizeof(mps_debug_strings) / sizeof(mps_debug_strings[0]);
1884 while ((token = strsep(&list, ":,")) != NULL) {
1885 /* Handle integer flags */
1886 flags |= strtol(token, &endtoken, 0);
1887 if (token != endtoken)
1888 continue;
1889
1890 /* Handle text flags */
1891 for (i = 0; i < sz; i++) {
1892 string = &mps_debug_strings[i];
1893 if (strcasecmp(token, string->name) == 0) {
1894 flags |= string->flag;
1895 break;
1896 }
1897 }
1898 }
1899
1900 switch (op) {
1901 case COMB_NONE:
1902 sc->mps_debug = flags;
1903 break;
1904 case COMB_ADD:
1905 sc->mps_debug |= flags;
1906 break;
1907 case COMB_SUB:
1908 sc->mps_debug &= (~flags);
1909 break;
1910 }
1911
1912 return;
1913 }
1914
1915 struct mps_dumpreq_hdr {
1916 uint32_t smid;
1917 uint32_t state;
1918 uint32_t numframes;
1919 uint32_t deschi;
1920 uint32_t desclo;
1921 };
1922
1923 static int
mps_dump_reqs(SYSCTL_HANDLER_ARGS)1924 mps_dump_reqs(SYSCTL_HANDLER_ARGS)
1925 {
1926 struct mps_softc *sc;
1927 struct mps_chain *chain, *chain1;
1928 struct mps_command *cm;
1929 struct mps_dumpreq_hdr hdr;
1930 struct sbuf *sb;
1931 uint32_t smid, state;
1932 int i, numreqs, error = 0;
1933
1934 sc = (struct mps_softc *)arg1;
1935
1936 if ((error = priv_check(curthread, PRIV_DRIVER)) != 0) {
1937 printf("priv check error %d\n", error);
1938 return (error);
1939 }
1940
1941 state = MPS_CM_STATE_INQUEUE;
1942 smid = 1;
1943 numreqs = sc->num_reqs;
1944
1945 if (req->newptr != NULL)
1946 return (EINVAL);
1947
1948 if (smid == 0 || smid > sc->num_reqs)
1949 return (EINVAL);
1950 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs))
1951 numreqs = sc->num_reqs;
1952 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
1953
1954 /* Best effort, no locking */
1955 for (i = smid; i < numreqs; i++) {
1956 cm = &sc->commands[i];
1957 if ((sc->dump_reqs_alltypes == 0) && (cm->cm_state != state))
1958 continue;
1959 hdr.smid = i;
1960 hdr.state = cm->cm_state;
1961 hdr.numframes = 1;
1962 hdr.deschi = cm->cm_desc.Words.High;
1963 hdr.desclo = cm->cm_desc.Words.Low;
1964 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
1965 chain1)
1966 hdr.numframes++;
1967 sbuf_bcat(sb, &hdr, sizeof(hdr));
1968 sbuf_bcat(sb, cm->cm_req, 128);
1969 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link,
1970 chain1)
1971 sbuf_bcat(sb, chain->chain, 128);
1972 }
1973
1974 error = sbuf_finish(sb);
1975 sbuf_delete(sb);
1976 return (error);
1977 }
1978
1979 int
mps_attach(struct mps_softc * sc)1980 mps_attach(struct mps_softc *sc)
1981 {
1982 int error;
1983
1984 MPS_FUNCTRACE(sc);
1985 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
1986
1987 mtx_init(&sc->mps_mtx, "MPT2SAS lock", NULL, MTX_DEF);
1988 callout_init_mtx(&sc->periodic, &sc->mps_mtx, 0);
1989 callout_init_mtx(&sc->device_check_callout, &sc->mps_mtx, 0);
1990 TAILQ_INIT(&sc->event_list);
1991 timevalclear(&sc->lastfail);
1992
1993 if ((error = mps_transition_ready(sc)) != 0) {
1994 mps_dprint(sc, MPS_INIT|MPS_FAULT, "failed to transition "
1995 "ready\n");
1996 return (error);
1997 }
1998
1999 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2,
2000 M_ZERO|M_NOWAIT);
2001 if(!sc->facts) {
2002 mps_dprint(sc, MPS_INIT|MPS_FAULT, "Cannot allocate memory, "
2003 "exit\n");
2004 return (ENOMEM);
2005 }
2006
2007 /*
2008 * Get IOC Facts and allocate all structures based on this information.
2009 * A Diag Reset will also call mps_iocfacts_allocate and re-read the IOC
2010 * Facts. If relevant values have changed in IOC Facts, this function
2011 * will free all of the memory based on IOC Facts and reallocate that
2012 * memory. If this fails, any allocated memory should already be freed.
2013 */
2014 if ((error = mps_iocfacts_allocate(sc, TRUE)) != 0) {
2015 mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC Facts based allocation "
2016 "failed with error %d, exit\n", error);
2017 return (error);
2018 }
2019
2020 /* Start the periodic watchdog check on the IOC Doorbell */
2021 mps_periodic(sc);
2022
2023 /*
2024 * The portenable will kick off discovery events that will drive the
2025 * rest of the initialization process. The CAM/SAS module will
2026 * hold up the boot sequence until discovery is complete.
2027 */
2028 sc->mps_ich.ich_func = mps_startup;
2029 sc->mps_ich.ich_arg = sc;
2030 if (config_intrhook_establish(&sc->mps_ich) != 0) {
2031 mps_dprint(sc, MPS_INIT|MPS_ERROR,
2032 "Cannot establish MPS config hook\n");
2033 error = EINVAL;
2034 }
2035
2036 /*
2037 * Allow IR to shutdown gracefully when shutdown occurs.
2038 */
2039 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
2040 mpssas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT);
2041
2042 if (sc->shutdown_eh == NULL)
2043 mps_dprint(sc, MPS_INIT|MPS_ERROR,
2044 "shutdown event registration failed\n");
2045
2046 mps_setup_sysctl(sc);
2047
2048 sc->mps_flags |= MPS_FLAGS_ATTACH_DONE;
2049 mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error);
2050
2051 return (error);
2052 }
2053
2054 /* Run through any late-start handlers. */
2055 static void
mps_startup(void * arg)2056 mps_startup(void *arg)
2057 {
2058 struct mps_softc *sc;
2059
2060 sc = (struct mps_softc *)arg;
2061 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
2062
2063 mps_lock(sc);
2064 mps_unmask_intr(sc);
2065
2066 /* initialize device mapping tables */
2067 mps_base_static_config_pages(sc);
2068 mps_mapping_initialize(sc);
2069 mpssas_startup(sc);
2070 mps_unlock(sc);
2071
2072 mps_dprint(sc, MPS_INIT, "disestablish config intrhook\n");
2073 config_intrhook_disestablish(&sc->mps_ich);
2074 sc->mps_ich.ich_arg = NULL;
2075
2076 mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
2077 }
2078
2079 /* Periodic watchdog. Is called with the driver lock already held. */
2080 static void
mps_periodic(void * arg)2081 mps_periodic(void *arg)
2082 {
2083 struct mps_softc *sc;
2084 uint32_t db;
2085
2086 sc = (struct mps_softc *)arg;
2087 if (sc->mps_flags & MPS_FLAGS_SHUTDOWN)
2088 return;
2089
2090 db = mps_regread(sc, MPI2_DOORBELL_OFFSET);
2091 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
2092 mps_dprint(sc, MPS_FAULT, "IOC Fault 0x%08x, Resetting\n", db);
2093 mps_reinit(sc);
2094 }
2095
2096 callout_reset_sbt(&sc->periodic, MPS_PERIODIC_DELAY * SBT_1S, 0,
2097 mps_periodic, sc, C_PREL(1));
2098 }
2099
2100 static void
mps_log_evt_handler(struct mps_softc * sc,uintptr_t data,MPI2_EVENT_NOTIFICATION_REPLY * event)2101 mps_log_evt_handler(struct mps_softc *sc, uintptr_t data,
2102 MPI2_EVENT_NOTIFICATION_REPLY *event)
2103 {
2104 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry;
2105
2106 MPS_DPRINT_EVENT(sc, generic, event);
2107
2108 switch (event->Event) {
2109 case MPI2_EVENT_LOG_DATA:
2110 mps_dprint(sc, MPS_EVENT, "MPI2_EVENT_LOG_DATA:\n");
2111 if (sc->mps_debug & MPS_EVENT)
2112 hexdump(event->EventData, event->EventDataLength, NULL, 0);
2113 break;
2114 case MPI2_EVENT_LOG_ENTRY_ADDED:
2115 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData;
2116 mps_dprint(sc, MPS_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event "
2117 "0x%x Sequence %d:\n", entry->LogEntryQualifier,
2118 entry->LogSequence);
2119 break;
2120 default:
2121 break;
2122 }
2123 return;
2124 }
2125
2126 static int
mps_attach_log(struct mps_softc * sc)2127 mps_attach_log(struct mps_softc *sc)
2128 {
2129 u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
2130
2131 bzero(events, 16);
2132 setbit(events, MPI2_EVENT_LOG_DATA);
2133 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED);
2134
2135 mps_register_events(sc, events, mps_log_evt_handler, NULL,
2136 &sc->mps_log_eh);
2137
2138 return (0);
2139 }
2140
2141 static int
mps_detach_log(struct mps_softc * sc)2142 mps_detach_log(struct mps_softc *sc)
2143 {
2144
2145 if (sc->mps_log_eh != NULL)
2146 mps_deregister_events(sc, sc->mps_log_eh);
2147 return (0);
2148 }
2149
2150 /*
2151 * Free all of the driver resources and detach submodules. Should be called
2152 * without the lock held.
2153 */
2154 int
mps_free(struct mps_softc * sc)2155 mps_free(struct mps_softc *sc)
2156 {
2157 int error;
2158
2159 mps_dprint(sc, MPS_INIT, "%s entered\n", __func__);
2160 /* Turn off the watchdog */
2161 mps_lock(sc);
2162 sc->mps_flags |= MPS_FLAGS_SHUTDOWN;
2163 mps_unlock(sc);
2164 /* Lock must not be held for this */
2165 callout_drain(&sc->periodic);
2166 callout_drain(&sc->device_check_callout);
2167
2168 if (((error = mps_detach_log(sc)) != 0) ||
2169 ((error = mps_detach_sas(sc)) != 0)) {
2170 mps_dprint(sc, MPS_INIT|MPS_FAULT, "failed to detach "
2171 "subsystems, exit\n");
2172 return (error);
2173 }
2174
2175 mps_detach_user(sc);
2176
2177 /* Put the IOC back in the READY state. */
2178 mps_lock(sc);
2179 if ((error = mps_transition_ready(sc)) != 0) {
2180 mps_unlock(sc);
2181 return (error);
2182 }
2183 mps_unlock(sc);
2184
2185 if (sc->facts != NULL)
2186 free(sc->facts, M_MPT2);
2187
2188 /*
2189 * Free all buffers that are based on IOC Facts. A Diag Reset may need
2190 * to free these buffers too.
2191 */
2192 mps_iocfacts_free(sc);
2193
2194 if (sc->sysctl_tree != NULL)
2195 sysctl_ctx_free(&sc->sysctl_ctx);
2196
2197 /* Deregister the shutdown function */
2198 if (sc->shutdown_eh != NULL)
2199 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh);
2200
2201 mtx_destroy(&sc->mps_mtx);
2202 mps_dprint(sc, MPS_INIT, "%s exit\n", __func__);
2203
2204 return (0);
2205 }
2206
2207 static __inline void
mps_complete_command(struct mps_softc * sc,struct mps_command * cm)2208 mps_complete_command(struct mps_softc *sc, struct mps_command *cm)
2209 {
2210 MPS_FUNCTRACE(sc);
2211
2212 if (cm == NULL) {
2213 mps_dprint(sc, MPS_ERROR, "Completing NULL command\n");
2214 return;
2215 }
2216
2217 KASSERT(cm->cm_state == MPS_CM_STATE_INQUEUE,
2218 ("command not inqueue, state = %u\n", cm->cm_state));
2219 cm->cm_state = MPS_CM_STATE_BUSY;
2220 if (cm->cm_flags & MPS_CM_FLAGS_POLLED)
2221 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE;
2222
2223 if (cm->cm_complete != NULL) {
2224 mps_dprint(sc, MPS_TRACE,
2225 "%s cm %p calling cm_complete %p data %p reply %p\n",
2226 __func__, cm, cm->cm_complete, cm->cm_complete_data,
2227 cm->cm_reply);
2228 cm->cm_complete(sc, cm);
2229 }
2230
2231 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) {
2232 mps_dprint(sc, MPS_TRACE, "waking up %p\n", cm);
2233 wakeup(cm);
2234 }
2235
2236 if (cm->cm_sc->io_cmds_active != 0) {
2237 cm->cm_sc->io_cmds_active--;
2238 } else {
2239 mps_dprint(sc, MPS_ERROR, "Warning: io_cmds_active is "
2240 "out of sync - resynching to 0\n");
2241 }
2242 }
2243
2244 static void
mps_sas_log_info(struct mps_softc * sc,u32 log_info)2245 mps_sas_log_info(struct mps_softc *sc , u32 log_info)
2246 {
2247 union loginfo_type {
2248 u32 loginfo;
2249 struct {
2250 u32 subcode:16;
2251 u32 code:8;
2252 u32 originator:4;
2253 u32 bus_type:4;
2254 } dw;
2255 };
2256 union loginfo_type sas_loginfo;
2257 char *originator_str = NULL;
2258
2259 sas_loginfo.loginfo = log_info;
2260 if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
2261 return;
2262
2263 /* each nexus loss loginfo */
2264 if (log_info == 0x31170000)
2265 return;
2266
2267 /* eat the loginfos associated with task aborts */
2268 if ((log_info == 30050000 || log_info ==
2269 0x31140000 || log_info == 0x31130000))
2270 return;
2271
2272 switch (sas_loginfo.dw.originator) {
2273 case 0:
2274 originator_str = "IOP";
2275 break;
2276 case 1:
2277 originator_str = "PL";
2278 break;
2279 case 2:
2280 originator_str = "IR";
2281 break;
2282 }
2283
2284 mps_dprint(sc, MPS_LOG, "log_info(0x%08x): originator(%s), "
2285 "code(0x%02x), sub_code(0x%04x)\n", log_info,
2286 originator_str, sas_loginfo.dw.code,
2287 sas_loginfo.dw.subcode);
2288 }
2289
2290 static void
mps_display_reply_info(struct mps_softc * sc,uint8_t * reply)2291 mps_display_reply_info(struct mps_softc *sc, uint8_t *reply)
2292 {
2293 MPI2DefaultReply_t *mpi_reply;
2294 u16 sc_status;
2295
2296 mpi_reply = (MPI2DefaultReply_t*)reply;
2297 sc_status = le16toh(mpi_reply->IOCStatus);
2298 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
2299 mps_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo));
2300 }
2301 void
mps_intr(void * data)2302 mps_intr(void *data)
2303 {
2304 struct mps_softc *sc;
2305 uint32_t status;
2306
2307 sc = (struct mps_softc *)data;
2308 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2309
2310 /*
2311 * Check interrupt status register to flush the bus. This is
2312 * needed for both INTx interrupts and driver-driven polling
2313 */
2314 status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET);
2315 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0)
2316 return;
2317
2318 mps_lock(sc);
2319 mps_intr_locked(data);
2320 mps_unlock(sc);
2321 return;
2322 }
2323
2324 /*
2325 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the
2326 * chip. Hopefully this theory is correct.
2327 */
2328 void
mps_intr_msi(void * data)2329 mps_intr_msi(void *data)
2330 {
2331 struct mps_softc *sc;
2332
2333 sc = (struct mps_softc *)data;
2334 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2335 mps_lock(sc);
2336 mps_intr_locked(data);
2337 mps_unlock(sc);
2338 return;
2339 }
2340
2341 /*
2342 * The locking is overly broad and simplistic, but easy to deal with for now.
2343 */
2344 void
mps_intr_locked(void * data)2345 mps_intr_locked(void *data)
2346 {
2347 MPI2_REPLY_DESCRIPTORS_UNION *desc;
2348 MPI2_DIAG_RELEASE_REPLY *rel_rep;
2349 mps_fw_diagnostic_buffer_t *pBuffer;
2350 struct mps_softc *sc;
2351 struct mps_command *cm = NULL;
2352 uint64_t tdesc;
2353 uint8_t flags;
2354 u_int pq;
2355
2356 sc = (struct mps_softc *)data;
2357
2358 pq = sc->replypostindex;
2359 mps_dprint(sc, MPS_TRACE,
2360 "%s sc %p starting with replypostindex %u\n",
2361 __func__, sc, sc->replypostindex);
2362
2363 for ( ;; ) {
2364 cm = NULL;
2365 desc = &sc->post_queue[sc->replypostindex];
2366
2367 /*
2368 * Copy and clear out the descriptor so that any reentry will
2369 * immediately know that this descriptor has already been
2370 * looked at. There is unfortunate casting magic because the
2371 * MPI API doesn't have a cardinal 64bit type.
2372 */
2373 tdesc = 0xffffffffffffffff;
2374 tdesc = atomic_swap_64((uint64_t *)desc, tdesc);
2375 desc = (MPI2_REPLY_DESCRIPTORS_UNION *)&tdesc;
2376
2377 flags = desc->Default.ReplyFlags &
2378 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
2379 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
2380 || (le32toh(desc->Words.High) == 0xffffffff))
2381 break;
2382
2383 /* increment the replypostindex now, so that event handlers
2384 * and cm completion handlers which decide to do a diag
2385 * reset can zero it without it getting incremented again
2386 * afterwards, and we break out of this loop on the next
2387 * iteration since the reply post queue has been cleared to
2388 * 0xFF and all descriptors look unused (which they are).
2389 */
2390 if (++sc->replypostindex >= sc->pqdepth)
2391 sc->replypostindex = 0;
2392
2393 switch (flags) {
2394 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS:
2395 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)];
2396 cm->cm_reply = NULL;
2397 break;
2398 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY:
2399 {
2400 uint32_t baddr;
2401 uint8_t *reply;
2402
2403 /*
2404 * Re-compose the reply address from the address
2405 * sent back from the chip. The ReplyFrameAddress
2406 * is the lower 32 bits of the physical address of
2407 * particular reply frame. Convert that address to
2408 * host format, and then use that to provide the
2409 * offset against the virtual address base
2410 * (sc->reply_frames).
2411 */
2412 baddr = le32toh(desc->AddressReply.ReplyFrameAddress);
2413 reply = sc->reply_frames +
2414 (baddr - ((uint32_t)sc->reply_busaddr));
2415 /*
2416 * Make sure the reply we got back is in a valid
2417 * range. If not, go ahead and panic here, since
2418 * we'll probably panic as soon as we deference the
2419 * reply pointer anyway.
2420 */
2421 if ((reply < sc->reply_frames)
2422 || (reply > (sc->reply_frames +
2423 (sc->fqdepth * sc->replyframesz)))) {
2424 printf("%s: WARNING: reply %p out of range!\n",
2425 __func__, reply);
2426 printf("%s: reply_frames %p, fqdepth %d, "
2427 "frame size %d\n", __func__,
2428 sc->reply_frames, sc->fqdepth,
2429 sc->replyframesz);
2430 printf("%s: baddr %#x,\n", __func__, baddr);
2431 /* LSI-TODO. See Linux Code for Graceful exit */
2432 panic("Reply address out of range");
2433 }
2434 if (le16toh(desc->AddressReply.SMID) == 0) {
2435 if (((MPI2_DEFAULT_REPLY *)reply)->Function ==
2436 MPI2_FUNCTION_DIAG_BUFFER_POST) {
2437 /*
2438 * If SMID is 0 for Diag Buffer Post,
2439 * this implies that the reply is due to
2440 * a release function with a status that
2441 * the buffer has been released. Set
2442 * the buffer flags accordingly.
2443 */
2444 rel_rep =
2445 (MPI2_DIAG_RELEASE_REPLY *)reply;
2446 if ((le16toh(rel_rep->IOCStatus) &
2447 MPI2_IOCSTATUS_MASK) ==
2448 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED)
2449 {
2450 pBuffer =
2451 &sc->fw_diag_buffer_list[
2452 rel_rep->BufferType];
2453 pBuffer->valid_data = TRUE;
2454 pBuffer->owned_by_firmware =
2455 FALSE;
2456 pBuffer->immediate = FALSE;
2457 }
2458 } else
2459 mps_dispatch_event(sc, baddr,
2460 (MPI2_EVENT_NOTIFICATION_REPLY *)
2461 reply);
2462 } else {
2463 /*
2464 * Ignore commands not in INQUEUE state
2465 * since they've already been completed
2466 * via another path.
2467 */
2468 cm = &sc->commands[
2469 le16toh(desc->AddressReply.SMID)];
2470 if (cm->cm_state == MPS_CM_STATE_INQUEUE) {
2471 cm->cm_reply = reply;
2472 cm->cm_reply_data = le32toh(
2473 desc->AddressReply.ReplyFrameAddress);
2474 } else {
2475 mps_dprint(sc, MPS_RECOVERY,
2476 "Bad state for ADDRESS_REPLY status,"
2477 " ignoring state %d cm %p\n",
2478 cm->cm_state, cm);
2479 }
2480 }
2481 break;
2482 }
2483 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS:
2484 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER:
2485 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS:
2486 default:
2487 /* Unhandled */
2488 mps_dprint(sc, MPS_ERROR, "Unhandled reply 0x%x\n",
2489 desc->Default.ReplyFlags);
2490 cm = NULL;
2491 break;
2492 }
2493
2494
2495 if (cm != NULL) {
2496 // Print Error reply frame
2497 if (cm->cm_reply)
2498 mps_display_reply_info(sc,cm->cm_reply);
2499 mps_complete_command(sc, cm);
2500 }
2501 }
2502
2503 if (pq != sc->replypostindex) {
2504 mps_dprint(sc, MPS_TRACE, "%s sc %p writing postindex %d\n",
2505 __func__, sc, sc->replypostindex);
2506 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET,
2507 sc->replypostindex);
2508 }
2509
2510 return;
2511 }
2512
2513 static void
mps_dispatch_event(struct mps_softc * sc,uintptr_t data,MPI2_EVENT_NOTIFICATION_REPLY * reply)2514 mps_dispatch_event(struct mps_softc *sc, uintptr_t data,
2515 MPI2_EVENT_NOTIFICATION_REPLY *reply)
2516 {
2517 struct mps_event_handle *eh;
2518 int event, handled = 0;
2519
2520 event = le16toh(reply->Event);
2521 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2522 if (isset(eh->mask, event)) {
2523 eh->callback(sc, data, reply);
2524 handled++;
2525 }
2526 }
2527
2528 if (handled == 0)
2529 mps_dprint(sc, MPS_EVENT, "Unhandled event 0x%x\n", le16toh(event));
2530
2531 /*
2532 * This is the only place that the event/reply should be freed.
2533 * Anything wanting to hold onto the event data should have
2534 * already copied it into their own storage.
2535 */
2536 mps_free_reply(sc, data);
2537 }
2538
2539 static void
mps_reregister_events_complete(struct mps_softc * sc,struct mps_command * cm)2540 mps_reregister_events_complete(struct mps_softc *sc, struct mps_command *cm)
2541 {
2542 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2543
2544 if (cm->cm_reply)
2545 MPS_DPRINT_EVENT(sc, generic,
2546 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply);
2547
2548 mps_free_command(sc, cm);
2549
2550 /* next, send a port enable */
2551 mpssas_startup(sc);
2552 }
2553
2554 /*
2555 * For both register_events and update_events, the caller supplies a bitmap
2556 * of events that it _wants_. These functions then turn that into a bitmask
2557 * suitable for the controller.
2558 */
2559 int
mps_register_events(struct mps_softc * sc,u32 * mask,mps_evt_callback_t * cb,void * data,struct mps_event_handle ** handle)2560 mps_register_events(struct mps_softc *sc, u32 *mask,
2561 mps_evt_callback_t *cb, void *data, struct mps_event_handle **handle)
2562 {
2563 struct mps_event_handle *eh;
2564 int error = 0;
2565
2566 eh = malloc(sizeof(struct mps_event_handle), M_MPT2, M_WAITOK|M_ZERO);
2567 eh->callback = cb;
2568 eh->data = data;
2569 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list);
2570 if (mask != NULL)
2571 error = mps_update_events(sc, eh, mask);
2572 *handle = eh;
2573
2574 return (error);
2575 }
2576
2577 int
mps_update_events(struct mps_softc * sc,struct mps_event_handle * handle,u32 * mask)2578 mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle,
2579 u32 *mask)
2580 {
2581 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
2582 MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL;
2583 struct mps_command *cm;
2584 int error, i;
2585
2586 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2587
2588 if ((mask != NULL) && (handle != NULL))
2589 bcopy(mask, &handle->mask[0], sizeof(u32) *
2590 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS);
2591
2592 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2593 sc->event_mask[i] = -1;
2594
2595 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2596 sc->event_mask[i] &= ~handle->mask[i];
2597
2598 if ((cm = mps_alloc_command(sc)) == NULL)
2599 return (EBUSY);
2600 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2601 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2602 evtreq->MsgFlags = 0;
2603 evtreq->SASBroadcastPrimitiveMasks = 0;
2604 #ifdef MPS_DEBUG_ALL_EVENTS
2605 {
2606 u_char fullmask[16];
2607 memset(fullmask, 0x00, 16);
2608 bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) *
2609 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS);
2610 }
2611 #else
2612 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2613 evtreq->EventMasks[i] =
2614 htole32(sc->event_mask[i]);
2615 #endif
2616 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2617 cm->cm_data = NULL;
2618
2619 error = mps_wait_command(sc, &cm, 60, 0);
2620 if (cm != NULL)
2621 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply;
2622 if ((reply == NULL) ||
2623 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS)
2624 error = ENXIO;
2625
2626 if (reply)
2627 MPS_DPRINT_EVENT(sc, generic, reply);
2628
2629 mps_dprint(sc, MPS_TRACE, "%s finished error %d\n", __func__, error);
2630
2631 if (cm != NULL)
2632 mps_free_command(sc, cm);
2633 return (error);
2634 }
2635
2636 static int
mps_reregister_events(struct mps_softc * sc)2637 mps_reregister_events(struct mps_softc *sc)
2638 {
2639 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq;
2640 struct mps_command *cm;
2641 struct mps_event_handle *eh;
2642 int error, i;
2643
2644 mps_dprint(sc, MPS_TRACE, "%s\n", __func__);
2645
2646 /* first, reregister events */
2647
2648 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2649 sc->event_mask[i] = -1;
2650
2651 TAILQ_FOREACH(eh, &sc->event_list, eh_list) {
2652 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2653 sc->event_mask[i] &= ~eh->mask[i];
2654 }
2655
2656 if ((cm = mps_alloc_command(sc)) == NULL)
2657 return (EBUSY);
2658 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req;
2659 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
2660 evtreq->MsgFlags = 0;
2661 evtreq->SASBroadcastPrimitiveMasks = 0;
2662 #ifdef MPS_DEBUG_ALL_EVENTS
2663 {
2664 u_char fullmask[16];
2665 memset(fullmask, 0x00, 16);
2666 bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) *
2667 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS);
2668 }
2669 #else
2670 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
2671 evtreq->EventMasks[i] =
2672 htole32(sc->event_mask[i]);
2673 #endif
2674 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
2675 cm->cm_data = NULL;
2676 cm->cm_complete = mps_reregister_events_complete;
2677
2678 error = mps_map_command(sc, cm);
2679
2680 mps_dprint(sc, MPS_TRACE, "%s finished with error %d\n", __func__,
2681 error);
2682 return (error);
2683 }
2684
2685 void
mps_deregister_events(struct mps_softc * sc,struct mps_event_handle * handle)2686 mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle)
2687 {
2688
2689 TAILQ_REMOVE(&sc->event_list, handle, eh_list);
2690 free(handle, M_MPT2);
2691 }
2692
2693 /*
2694 * Add a chain element as the next SGE for the specified command.
2695 * Reset cm_sge and cm_sgesize to indicate all the available space.
2696 */
2697 static int
mps_add_chain(struct mps_command * cm)2698 mps_add_chain(struct mps_command *cm)
2699 {
2700 MPI2_SGE_CHAIN64 *sgc;
2701 struct mps_chain *chain;
2702 u_int space;
2703
2704 if (cm->cm_sglsize < MPS_SGC_SIZE)
2705 panic("MPS: Need SGE Error Code\n");
2706
2707 chain = mps_alloc_chain(cm->cm_sc);
2708 if (chain == NULL)
2709 return (ENOBUFS);
2710
2711 space = cm->cm_sc->reqframesz;
2712
2713 /*
2714 * Note: a double-linked list is used to make it easier to
2715 * walk for debugging.
2716 */
2717 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link);
2718
2719 sgc = (MPI2_SGE_CHAIN64 *)&cm->cm_sge->MpiChain;
2720 sgc->Length = htole16(space);
2721 sgc->NextChainOffset = 0;
2722 /* TODO Looks like bug in Setting sgc->Flags.
2723 * sgc->Flags = ( MPI2_SGE_FLAGS_CHAIN_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
2724 * MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT
2725 * This is fine.. because we are not using simple element. In case of
2726 * MPI2_SGE_CHAIN64, we have separate Length and Flags field.
2727 */
2728 sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
2729 sgc->Address.High = htole32(chain->chain_busaddr >> 32);
2730 sgc->Address.Low = htole32(chain->chain_busaddr);
2731
2732 cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple;
2733 cm->cm_sglsize = space;
2734 return (0);
2735 }
2736
2737 /*
2738 * Add one scatter-gather element (chain, simple, transaction context)
2739 * to the scatter-gather list for a command. Maintain cm_sglsize and
2740 * cm_sge as the remaining size and pointer to the next SGE to fill
2741 * in, respectively.
2742 */
2743 int
mps_push_sge(struct mps_command * cm,void * sgep,size_t len,int segsleft)2744 mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft)
2745 {
2746 MPI2_SGE_TRANSACTION_UNION *tc = sgep;
2747 MPI2_SGE_SIMPLE64 *sge = sgep;
2748 int error, type;
2749 uint32_t saved_buf_len, saved_address_low, saved_address_high;
2750
2751 type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK);
2752
2753 #ifdef INVARIANTS
2754 switch (type) {
2755 case MPI2_SGE_FLAGS_TRANSACTION_ELEMENT: {
2756 if (len != tc->DetailsLength + 4)
2757 panic("TC %p length %u or %zu?", tc,
2758 tc->DetailsLength + 4, len);
2759 }
2760 break;
2761 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
2762 /* Driver only uses 64-bit chain elements */
2763 if (len != MPS_SGC_SIZE)
2764 panic("CHAIN %p length %u or %zu?", sgep,
2765 MPS_SGC_SIZE, len);
2766 break;
2767 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
2768 /* Driver only uses 64-bit SGE simple elements */
2769 if (len != MPS_SGE64_SIZE)
2770 panic("SGE simple %p length %u or %zu?", sge,
2771 MPS_SGE64_SIZE, len);
2772 if (((le32toh(sge->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT) &
2773 MPI2_SGE_FLAGS_ADDRESS_SIZE) == 0)
2774 panic("SGE simple %p not marked 64-bit?", sge);
2775
2776 break;
2777 default:
2778 panic("Unexpected SGE %p, flags %02x", tc, tc->Flags);
2779 }
2780 #endif
2781
2782 /*
2783 * case 1: 1 more segment, enough room for it
2784 * case 2: 2 more segments, enough room for both
2785 * case 3: >=2 more segments, only enough room for 1 and a chain
2786 * case 4: >=1 more segment, enough room for only a chain
2787 * case 5: >=1 more segment, no room for anything (error)
2788 */
2789
2790 /*
2791 * There should be room for at least a chain element, or this
2792 * code is buggy. Case (5).
2793 */
2794 if (cm->cm_sglsize < MPS_SGC_SIZE)
2795 panic("MPS: Need SGE Error Code\n");
2796
2797 if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) {
2798 /*
2799 * 1 or more segment, enough room for only a chain.
2800 * Hope the previous element wasn't a Simple entry
2801 * that needed to be marked with
2802 * MPI2_SGE_FLAGS_LAST_ELEMENT. Case (4).
2803 */
2804 if ((error = mps_add_chain(cm)) != 0)
2805 return (error);
2806 }
2807
2808 if (segsleft >= 2 &&
2809 cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) {
2810 /*
2811 * There are 2 or more segments left to add, and only
2812 * enough room for 1 and a chain. Case (3).
2813 *
2814 * Mark as last element in this chain if necessary.
2815 */
2816 if (type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
2817 sge->FlagsLength |= htole32(
2818 MPI2_SGE_FLAGS_LAST_ELEMENT << MPI2_SGE_FLAGS_SHIFT);
2819 }
2820
2821 /*
2822 * Add the item then a chain. Do the chain now,
2823 * rather than on the next iteration, to simplify
2824 * understanding the code.
2825 */
2826 cm->cm_sglsize -= len;
2827 bcopy(sgep, cm->cm_sge, len);
2828 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
2829 return (mps_add_chain(cm));
2830 }
2831
2832 #ifdef INVARIANTS
2833 /* Case 1: 1 more segment, enough room for it. */
2834 if (segsleft == 1 && cm->cm_sglsize < len)
2835 panic("1 seg left and no room? %u versus %zu",
2836 cm->cm_sglsize, len);
2837
2838 /* Case 2: 2 more segments, enough room for both */
2839 if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE)
2840 panic("2 segs left and no room? %u versus %zu",
2841 cm->cm_sglsize, len);
2842 #endif
2843
2844 if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) {
2845 /*
2846 * If this is a bi-directional request, need to account for that
2847 * here. Save the pre-filled sge values. These will be used
2848 * either for the 2nd SGL or for a single direction SGL. If
2849 * cm_out_len is non-zero, this is a bi-directional request, so
2850 * fill in the OUT SGL first, then the IN SGL, otherwise just
2851 * fill in the IN SGL. Note that at this time, when filling in
2852 * 2 SGL's for a bi-directional request, they both use the same
2853 * DMA buffer (same cm command).
2854 */
2855 saved_buf_len = le32toh(sge->FlagsLength) & 0x00FFFFFF;
2856 saved_address_low = sge->Address.Low;
2857 saved_address_high = sge->Address.High;
2858 if (cm->cm_out_len) {
2859 sge->FlagsLength = htole32(cm->cm_out_len |
2860 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2861 MPI2_SGE_FLAGS_END_OF_BUFFER |
2862 MPI2_SGE_FLAGS_HOST_TO_IOC |
2863 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
2864 MPI2_SGE_FLAGS_SHIFT));
2865 cm->cm_sglsize -= len;
2866 bcopy(sgep, cm->cm_sge, len);
2867 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge
2868 + len);
2869 }
2870 saved_buf_len |=
2871 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2872 MPI2_SGE_FLAGS_END_OF_BUFFER |
2873 MPI2_SGE_FLAGS_LAST_ELEMENT |
2874 MPI2_SGE_FLAGS_END_OF_LIST |
2875 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) <<
2876 MPI2_SGE_FLAGS_SHIFT);
2877 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) {
2878 saved_buf_len |=
2879 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) <<
2880 MPI2_SGE_FLAGS_SHIFT);
2881 } else {
2882 saved_buf_len |=
2883 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) <<
2884 MPI2_SGE_FLAGS_SHIFT);
2885 }
2886 sge->FlagsLength = htole32(saved_buf_len);
2887 sge->Address.Low = saved_address_low;
2888 sge->Address.High = saved_address_high;
2889 }
2890
2891 cm->cm_sglsize -= len;
2892 bcopy(sgep, cm->cm_sge, len);
2893 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len);
2894 return (0);
2895 }
2896
2897 /*
2898 * Add one dma segment to the scatter-gather list for a command.
2899 */
2900 int
mps_add_dmaseg(struct mps_command * cm,vm_paddr_t pa,size_t len,u_int flags,int segsleft)2901 mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags,
2902 int segsleft)
2903 {
2904 MPI2_SGE_SIMPLE64 sge;
2905
2906 /*
2907 * This driver always uses 64-bit address elements for simplicity.
2908 */
2909 bzero(&sge, sizeof(sge));
2910 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2911 MPI2_SGE_FLAGS_64_BIT_ADDRESSING;
2912 sge.FlagsLength = htole32(len | (flags << MPI2_SGE_FLAGS_SHIFT));
2913 mps_from_u64(pa, &sge.Address);
2914
2915 return (mps_push_sge(cm, &sge, sizeof sge, segsleft));
2916 }
2917
2918 static void
mps_data_cb(void * arg,bus_dma_segment_t * segs,int nsegs,int error)2919 mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
2920 {
2921 struct mps_softc *sc;
2922 struct mps_command *cm;
2923 u_int i, dir, sflags;
2924
2925 cm = (struct mps_command *)arg;
2926 sc = cm->cm_sc;
2927
2928 /*
2929 * In this case, just print out a warning and let the chip tell the
2930 * user they did the wrong thing.
2931 */
2932 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) {
2933 mps_dprint(sc, MPS_ERROR,
2934 "%s: warning: busdma returned %d segments, "
2935 "more than the %d allowed\n", __func__, nsegs,
2936 cm->cm_max_segs);
2937 }
2938
2939 /*
2940 * Set up DMA direction flags. Bi-directional requests are also handled
2941 * here. In that case, both direction flags will be set.
2942 */
2943 sflags = 0;
2944 if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) {
2945 /*
2946 * We have to add a special case for SMP passthrough, there
2947 * is no easy way to generically handle it. The first
2948 * S/G element is used for the command (therefore the
2949 * direction bit needs to be set). The second one is used
2950 * for the reply. We'll leave it to the caller to make
2951 * sure we only have two buffers.
2952 */
2953 /*
2954 * Even though the busdma man page says it doesn't make
2955 * sense to have both direction flags, it does in this case.
2956 * We have one s/g element being accessed in each direction.
2957 */
2958 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD;
2959
2960 /*
2961 * Set the direction flag on the first buffer in the SMP
2962 * passthrough request. We'll clear it for the second one.
2963 */
2964 sflags |= MPI2_SGE_FLAGS_DIRECTION |
2965 MPI2_SGE_FLAGS_END_OF_BUFFER;
2966 } else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) {
2967 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2968 dir = BUS_DMASYNC_PREWRITE;
2969 } else
2970 dir = BUS_DMASYNC_PREREAD;
2971
2972 for (i = 0; i < nsegs; i++) {
2973 if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) && (i != 0)) {
2974 sflags &= ~MPI2_SGE_FLAGS_DIRECTION;
2975 }
2976 error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len,
2977 sflags, nsegs - i);
2978 if (error != 0) {
2979 /* Resource shortage, roll back! */
2980 if (ratecheck(&sc->lastfail, &mps_chainfail_interval))
2981 mps_dprint(sc, MPS_INFO, "Out of chain frames, "
2982 "consider increasing hw.mps.max_chains.\n");
2983 cm->cm_flags |= MPS_CM_FLAGS_CHAIN_FAILED;
2984 /*
2985 * mpr_complete_command can only be called on commands
2986 * that are in the queue. Since this is an error path
2987 * which gets called before we enqueue, update the state
2988 * to meet this requirement before we complete it.
2989 */
2990 cm->cm_state = MPS_CM_STATE_INQUEUE;
2991 mps_complete_command(sc, cm);
2992 return;
2993 }
2994 }
2995
2996 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir);
2997 mps_enqueue_request(sc, cm);
2998
2999 return;
3000 }
3001
3002 static void
mps_data_cb2(void * arg,bus_dma_segment_t * segs,int nsegs,bus_size_t mapsize,int error)3003 mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize,
3004 int error)
3005 {
3006 mps_data_cb(arg, segs, nsegs, error);
3007 }
3008
3009 /*
3010 * This is the routine to enqueue commands ansynchronously.
3011 * Note that the only error path here is from bus_dmamap_load(), which can
3012 * return EINPROGRESS if it is waiting for resources. Other than this, it's
3013 * assumed that if you have a command in-hand, then you have enough credits
3014 * to use it.
3015 */
3016 int
mps_map_command(struct mps_softc * sc,struct mps_command * cm)3017 mps_map_command(struct mps_softc *sc, struct mps_command *cm)
3018 {
3019 int error = 0;
3020
3021 if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) {
3022 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap,
3023 &cm->cm_uio, mps_data_cb2, cm, 0);
3024 } else if (cm->cm_flags & MPS_CM_FLAGS_USE_CCB) {
3025 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap,
3026 cm->cm_data, mps_data_cb, cm, 0);
3027 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) {
3028 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap,
3029 cm->cm_data, cm->cm_length, mps_data_cb, cm, 0);
3030 } else {
3031 /* Add a zero-length element as needed */
3032 if (cm->cm_sge != NULL)
3033 mps_add_dmaseg(cm, 0, 0, 0, 1);
3034 mps_enqueue_request(sc, cm);
3035 }
3036
3037 return (error);
3038 }
3039
3040 /*
3041 * This is the routine to enqueue commands synchronously. An error of
3042 * EINPROGRESS from mps_map_command() is ignored since the command will
3043 * be executed and enqueued automatically. Other errors come from msleep().
3044 */
3045 int
mps_wait_command(struct mps_softc * sc,struct mps_command ** cmp,int timeout,int sleep_flag)3046 mps_wait_command(struct mps_softc *sc, struct mps_command **cmp, int timeout,
3047 int sleep_flag)
3048 {
3049 int error, rc;
3050 struct timeval cur_time, start_time;
3051 struct mps_command *cm = *cmp;
3052
3053 if (sc->mps_flags & MPS_FLAGS_DIAGRESET)
3054 return EBUSY;
3055
3056 cm->cm_complete = NULL;
3057 cm->cm_flags |= MPS_CM_FLAGS_POLLED;
3058 error = mps_map_command(sc, cm);
3059 if ((error != 0) && (error != EINPROGRESS))
3060 return (error);
3061
3062 /*
3063 * Check for context and wait for 50 mSec at a time until time has
3064 * expired or the command has finished. If msleep can't be used, need
3065 * to poll.
3066 */
3067 if (curthread->td_no_sleeping != 0)
3068 sleep_flag = NO_SLEEP;
3069 getmicrouptime(&start_time);
3070 if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP) {
3071 cm->cm_flags |= MPS_CM_FLAGS_WAKEUP;
3072 error = msleep(cm, &sc->mps_mtx, 0, "mpswait", timeout*hz);
3073 if (error == EWOULDBLOCK) {
3074 /*
3075 * Record the actual elapsed time in the case of a
3076 * timeout for the message below.
3077 */
3078 getmicrouptime(&cur_time);
3079 timevalsub(&cur_time, &start_time);
3080 }
3081 } else {
3082 while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) {
3083 mps_intr_locked(sc);
3084 if (sleep_flag == CAN_SLEEP)
3085 pause("mpswait", hz/20);
3086 else
3087 DELAY(50000);
3088
3089 getmicrouptime(&cur_time);
3090 timevalsub(&cur_time, &start_time);
3091 if (cur_time.tv_sec > timeout) {
3092 error = EWOULDBLOCK;
3093 break;
3094 }
3095 }
3096 }
3097
3098 if (error == EWOULDBLOCK) {
3099 if (cm->cm_timeout_handler == NULL) {
3100 mps_dprint(sc, MPS_FAULT, "Calling Reinit from %s, timeout=%d,"
3101 " elapsed=%jd\n", __func__, timeout,
3102 (intmax_t)cur_time.tv_sec);
3103 rc = mps_reinit(sc);
3104 mps_dprint(sc, MPS_FAULT, "Reinit %s\n", (rc == 0) ? "success" :
3105 "failed");
3106 } else
3107 cm->cm_timeout_handler(sc, cm);
3108 if (sc->mps_flags & MPS_FLAGS_REALLOCATED) {
3109 /*
3110 * Tell the caller that we freed the command in a
3111 * reinit.
3112 */
3113 *cmp = NULL;
3114 }
3115 error = ETIMEDOUT;
3116 }
3117 return (error);
3118 }
3119
3120 /*
3121 * The MPT driver had a verbose interface for config pages. In this driver,
3122 * reduce it to much simpler terms, similar to the Linux driver.
3123 */
3124 int
mps_read_config_page(struct mps_softc * sc,struct mps_config_params * params)3125 mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params)
3126 {
3127 MPI2_CONFIG_REQUEST *req;
3128 struct mps_command *cm;
3129 int error;
3130
3131 if (sc->mps_flags & MPS_FLAGS_BUSY) {
3132 return (EBUSY);
3133 }
3134
3135 cm = mps_alloc_command(sc);
3136 if (cm == NULL) {
3137 return (EBUSY);
3138 }
3139
3140 req = (MPI2_CONFIG_REQUEST *)cm->cm_req;
3141 req->Function = MPI2_FUNCTION_CONFIG;
3142 req->Action = params->action;
3143 req->SGLFlags = 0;
3144 req->ChainOffset = 0;
3145 req->PageAddress = params->page_address;
3146 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
3147 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr;
3148
3149 hdr = ¶ms->hdr.Ext;
3150 req->ExtPageType = hdr->ExtPageType;
3151 req->ExtPageLength = hdr->ExtPageLength;
3152 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
3153 req->Header.PageLength = 0; /* Must be set to zero */
3154 req->Header.PageNumber = hdr->PageNumber;
3155 req->Header.PageVersion = hdr->PageVersion;
3156 } else {
3157 MPI2_CONFIG_PAGE_HEADER *hdr;
3158
3159 hdr = ¶ms->hdr.Struct;
3160 req->Header.PageType = hdr->PageType;
3161 req->Header.PageNumber = hdr->PageNumber;
3162 req->Header.PageLength = hdr->PageLength;
3163 req->Header.PageVersion = hdr->PageVersion;
3164 }
3165
3166 cm->cm_data = params->buffer;
3167 cm->cm_length = params->length;
3168 if (cm->cm_data != NULL) {
3169 cm->cm_sge = &req->PageBufferSGE;
3170 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION);
3171 cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN;
3172 } else
3173 cm->cm_sge = NULL;
3174 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3175
3176 cm->cm_complete_data = params;
3177 if (params->callback != NULL) {
3178 cm->cm_complete = mps_config_complete;
3179 return (mps_map_command(sc, cm));
3180 } else {
3181 error = mps_wait_command(sc, &cm, 0, CAN_SLEEP);
3182 if (error) {
3183 mps_dprint(sc, MPS_FAULT,
3184 "Error %d reading config page\n", error);
3185 if (cm != NULL)
3186 mps_free_command(sc, cm);
3187 return (error);
3188 }
3189 mps_config_complete(sc, cm);
3190 }
3191
3192 return (0);
3193 }
3194
3195 int
mps_write_config_page(struct mps_softc * sc,struct mps_config_params * params)3196 mps_write_config_page(struct mps_softc *sc, struct mps_config_params *params)
3197 {
3198 return (EINVAL);
3199 }
3200
3201 static void
mps_config_complete(struct mps_softc * sc,struct mps_command * cm)3202 mps_config_complete(struct mps_softc *sc, struct mps_command *cm)
3203 {
3204 MPI2_CONFIG_REPLY *reply;
3205 struct mps_config_params *params;
3206
3207 MPS_FUNCTRACE(sc);
3208 params = cm->cm_complete_data;
3209
3210 if (cm->cm_data != NULL) {
3211 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap,
3212 BUS_DMASYNC_POSTREAD);
3213 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap);
3214 }
3215
3216 /*
3217 * XXX KDM need to do more error recovery? This results in the
3218 * device in question not getting probed.
3219 */
3220 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) {
3221 params->status = MPI2_IOCSTATUS_BUSY;
3222 goto done;
3223 }
3224
3225 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply;
3226 if (reply == NULL) {
3227 params->status = MPI2_IOCSTATUS_BUSY;
3228 goto done;
3229 }
3230 params->status = reply->IOCStatus;
3231 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) {
3232 params->hdr.Ext.ExtPageType = reply->ExtPageType;
3233 params->hdr.Ext.ExtPageLength = reply->ExtPageLength;
3234 params->hdr.Ext.PageType = reply->Header.PageType;
3235 params->hdr.Ext.PageNumber = reply->Header.PageNumber;
3236 params->hdr.Ext.PageVersion = reply->Header.PageVersion;
3237 } else {
3238 params->hdr.Struct.PageType = reply->Header.PageType;
3239 params->hdr.Struct.PageNumber = reply->Header.PageNumber;
3240 params->hdr.Struct.PageLength = reply->Header.PageLength;
3241 params->hdr.Struct.PageVersion = reply->Header.PageVersion;
3242 }
3243
3244 done:
3245 mps_free_command(sc, cm);
3246 if (params->callback != NULL)
3247 params->callback(sc, params);
3248
3249 return;
3250 }
3251